From 2a2c694758d6a48125cc9adf446f2054b52db201 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 10 Mar 2014 16:11:03 -0400 Subject: [PATCH 001/219] registry: make certain headers optional For a pull-only, static registry, there only a couple of headers that need to be optional (that are presently required. * X-Docker-Registry-Version * X-Docker-Size * X-Docker-Endpoints Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- registry/registry.go | 53 +++++++++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index dbf5d539ff..30079e9aa9 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -25,12 +25,8 @@ var ( errLoginRequired = errors.New("Authentication is required.") ) -func pingRegistryEndpoint(endpoint string) (bool, error) { - if endpoint == IndexServerAddress() { - // Skip the check, we now this one is valid - // (and we never want to fallback to http in case of error) - return false, nil - } +// reuse this chunk of code +func newClient() *http.Client { httpDial := func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds conn, err := net.DialTimeout(proto, addr, time.Duration(5)*time.Second) @@ -42,17 +38,39 @@ func pingRegistryEndpoint(endpoint string) (bool, error) { return conn, nil } httpTransport := &http.Transport{Dial: httpDial} - client := &http.Client{Transport: httpTransport} + return &http.Client{Transport: httpTransport} +} + +// Have an API to access the version of the registry +func getRegistryVersion(endpoint string) (string, error) { + + client := newClient() + resp, err := client.Get(endpoint + "_version") + if err != nil { + return "", err + } + defer resp.Body.Close() + + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + return hdr, nil + } + versionBody, err := ioutil.ReadAll(resp.Body) + return string(versionBody), err +} + +func pingRegistryEndpoint(endpoint string) (bool, error) { + if endpoint == IndexServerAddress() { + // Skip the check, we now this one is valid + // (and we never want to fallback to http in case of error) + return false, nil + } + client := newClient() resp, err := client.Get(endpoint + "_ping") if err != nil { return false, err } defer resp.Body.Close() - if resp.Header.Get("X-Docker-Registry-Version") == "" { - return false, errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)") - } - standalone := resp.Header.Get("X-Docker-Registry-Standalone") utils.Debugf("Registry standalone header: '%s'", standalone) // If the header is absent, we assume true for compatibility with earlier @@ -223,9 +241,13 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([ return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } - imageSize, err := strconv.Atoi(res.Header.Get("X-Docker-Size")) - if err != nil { - return nil, -1, err + // if the size header is not present, then set it to '-1' + imageSize := -1 + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.Atoi(hdr) + if err != nil { + return nil, -1, err + } } jsonString, err := ioutil.ReadAll(res.Body) @@ -336,7 +358,8 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) } } else { - return nil, fmt.Errorf("Index response didn't contain any endpoints") + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, req.URL.Host)) } checksumsJSON, err := ioutil.ReadAll(res.Body) From 2b855afaeedcab3117876815ec2f8d4450a742b5 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 11 Mar 2014 23:36:51 -0400 Subject: [PATCH 002/219] registry: Info collection roll version and standalone information into the _ping. And to support Headers they are checked after the JSON is loaded (if there is anything to load). To stay backwards compatible, if the _ping contents are not able to unmarshal to RegistryInfo, do not stop, but continue with the same behavior. Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- registry/registry.go | 84 +++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 41 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 30079e9aa9..6040d75003 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -25,8 +25,12 @@ var ( errLoginRequired = errors.New("Authentication is required.") ) -// reuse this chunk of code -func newClient() *http.Client { +func pingRegistryEndpoint(endpoint string) (RegistryInfo, error) { + if endpoint == IndexServerAddress() { + // Skip the check, we now this one is valid + // (and we never want to fallback to http in case of error) + return RegistryInfo{Standalone: false}, nil + } httpDial := func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds conn, err := net.DialTimeout(proto, addr, time.Duration(5)*time.Second) @@ -38,51 +42,44 @@ func newClient() *http.Client { return conn, nil } httpTransport := &http.Transport{Dial: httpDial} - return &http.Client{Transport: httpTransport} -} - -// Have an API to access the version of the registry -func getRegistryVersion(endpoint string) (string, error) { - - client := newClient() - resp, err := client.Get(endpoint + "_version") - if err != nil { - return "", err - } - defer resp.Body.Close() - - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - return hdr, nil - } - versionBody, err := ioutil.ReadAll(resp.Body) - return string(versionBody), err -} - -func pingRegistryEndpoint(endpoint string) (bool, error) { - if endpoint == IndexServerAddress() { - // Skip the check, we now this one is valid - // (and we never want to fallback to http in case of error) - return false, nil - } - client := newClient() + client := &http.Client{Transport: httpTransport} resp, err := client.Get(endpoint + "_ping") if err != nil { - return false, err + return RegistryInfo{Standalone: false}, err } defer resp.Body.Close() + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return RegistryInfo{Standalone: false}, fmt.Errorf("Error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := RegistryInfo{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + utils.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + utils.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + utils.Debugf("RegistryInfo.Version: %q", info.Version) + standalone := resp.Header.Get("X-Docker-Registry-Standalone") utils.Debugf("Registry standalone header: '%s'", standalone) - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry - if standalone == "" { - return true, nil - // Accepted values are "true" (case-insensitive) and "1". - } else if strings.EqualFold(standalone, "true") || standalone == "1" { - return true, nil + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false } - // Otherwise, not standalone - return false, nil + utils.Debugf("RegistryInfo.Standalone: %q", info.Standalone) + return info, nil } func validateRepositoryName(repositoryName string) error { @@ -688,6 +685,11 @@ type ImgData struct { Tag string `json:",omitempty"` } +type RegistryInfo struct { + Version string `json:"version"` + Standalone bool `json:"standalone"` +} + type Registry struct { client *http.Client authConfig *AuthConfig @@ -716,11 +718,11 @@ func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, inde // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside our requests. if indexEndpoint != IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") { - standalone, err := pingRegistryEndpoint(indexEndpoint) + info, err := pingRegistryEndpoint(indexEndpoint) if err != nil { return nil, err } - if standalone { + if info.Standalone { utils.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", indexEndpoint) dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) factory.AddDecorator(dec) From 720f3447046355329b5ba5d850caca84328182d5 Mon Sep 17 00:00:00 2001 From: shin- Date: Mon, 14 Apr 2014 20:32:47 +0200 Subject: [PATCH 003/219] Added support for multiple endpoints in X-Docker-Endpoints header Docker-DCO-1.1-Signed-off-by: Joffrey F (github: shin-) --- registry/registry.go | 33 +++++++++++++++++++++++++-------- registry/registry_mock_test.go | 2 +- registry/registry_test.go | 15 ++++++++++++++- 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 817c08afa9..3656032e92 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -297,6 +297,25 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [ return nil, fmt.Errorf("Could not reach any registry endpoint") } +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedUrl, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedUrl.Scheme + // The Registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { indexEp := r.indexEndpoint repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote) @@ -332,11 +351,10 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { } var endpoints []string - var urlScheme = indexEp[:strings.Index(indexEp, ":")] if res.Header.Get("X-Docker-Endpoints") != "" { - // The Registry's URL scheme has to match the Index' - for _, ep := range res.Header["X-Docker-Endpoints"] { - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp) + if err != nil { + return nil, err } } else { return nil, fmt.Errorf("Index response didn't contain any endpoints") @@ -565,7 +583,6 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat } var tokens, endpoints []string - var urlScheme = indexEp[:strings.Index(indexEp, ":")] if !validate { if res.StatusCode != 200 && res.StatusCode != 201 { errBody, err := ioutil.ReadAll(res.Body) @@ -582,9 +599,9 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat } if res.Header.Get("X-Docker-Endpoints") != "" { - // The Registry's URL scheme has to match the Index' - for _, ep := range res.Header["X-Docker-Endpoints"] { - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp) + if err != nil { + return nil, err } } else { return nil, fmt.Errorf("Index response didn't contain any endpoints") diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index dd5da6bd50..6b00751318 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -291,7 +291,7 @@ func handlerUsers(w http.ResponseWriter, r *http.Request) { func handlerImages(w http.ResponseWriter, r *http.Request) { u, _ := url.Parse(testHttpServer.URL) - w.Header().Add("X-Docker-Endpoints", u.Host) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { if strings.HasSuffix(r.URL.Path, "images") { diff --git a/registry/registry_test.go b/registry/registry_test.go index c072da41c5..ad64fb1f4c 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -1,7 +1,9 @@ package registry import ( + "fmt" "github.com/dotcloud/docker/utils" + "net/url" "strings" "testing" ) @@ -99,12 +101,23 @@ func TestGetRemoteTags(t *testing.T) { func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistry(t) + parsedUrl, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedUrl.Host + "/v1/" data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") - assertEqual(t, len(data.Endpoints), 1, "Expected one endpoint in Endpoints") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + } func TestPushImageJSONRegistry(t *testing.T) { From 611a1d711bc382fb0dafafbdea5a5091383de972 Mon Sep 17 00:00:00 2001 From: Jack Danger Canty Date: Sat, 19 Apr 2014 23:25:48 -0700 Subject: [PATCH 004/219] typo fix: 'methid' -> 'method' --- engine/http.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/http.go b/engine/http.go index c0418bcfb0..7e4dcd7bb4 100644 --- a/engine/http.go +++ b/engine/http.go @@ -9,7 +9,7 @@ import ( // result as an http response. // This method allows an Engine instance to be passed as a standard http.Handler interface. // -// Note that the protocol used in this methid is a convenience wrapper and is not the canonical +// Note that the protocol used in this method is a convenience wrapper and is not the canonical // implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, // and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response // once data has been written to the body, which makes it inconvenient to return metadata such From cf997aa905c5c6f5a29fa3658d904ffc81a1a4a1 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 17 Apr 2014 20:42:57 -0700 Subject: [PATCH 005/219] container: Remove Ghost state container.Register() checks both IsRunning() and IsGhost(), but at this point IsGhost() is always true if IsRunning() is true. For a newly created container both are false, and for a restored-from-disk container Daemon.load() sets Ghost to true if IsRunning is true. So we just drop the IsGhost check. This was the last call to IsGhost, so we remove It and all other traces of the ghost state. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- daemon/daemon.go | 52 ++++++++++++++++++++---------------------------- daemon/state.go | 19 ------------------ 2 files changed, 22 insertions(+), 49 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 5ed1ea3c16..0e4d1a1699 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -134,9 +134,6 @@ func (daemon *Daemon) load(id string) (*Container, error) { if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } - if container.State.IsRunning() { - container.State.SetGhost(true) - } return container, nil } @@ -171,35 +168,32 @@ func (daemon *Daemon) Register(container *Container) error { // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { - if container.State.IsGhost() { - utils.Debugf("killing ghost %s", container.ID) + utils.Debugf("killing old running container %s", container.ID) - existingPid := container.State.Pid - container.State.SetGhost(false) - container.State.SetStopped(0) + existingPid := container.State.Pid + container.State.SetStopped(0) - // We only have to handle this for lxc because the other drivers will ensure that - // no ghost processes are left when docker dies - if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { - lxc.KillLxc(container.ID, 9) - } else { - // use the current driver and ensure that the container is dead x.x - cmd := &execdriver.Command{ - ID: container.ID, - } - var err error - cmd.Process, err = os.FindProcess(existingPid) - if err != nil { - utils.Debugf("cannot find existing process for %d", existingPid) - } - daemon.execDriver.Terminate(cmd) + // We only have to handle this for lxc because the other drivers will ensure that + // no processes are left when docker dies + if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { + lxc.KillLxc(container.ID, 9) + } else { + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + ID: container.ID, } - if err := container.Unmount(); err != nil { - utils.Debugf("ghost unmount error %s", err) - } - if err := container.ToDisk(); err != nil { - utils.Debugf("saving ghost state to disk %s", err) + var err error + cmd.Process, err = os.FindProcess(existingPid) + if err != nil { + utils.Debugf("cannot find existing process for %d", existingPid) } + daemon.execDriver.Terminate(cmd) + } + if err := container.Unmount(); err != nil { + utils.Debugf("unmount error %s", err) + } + if err := container.ToDisk(); err != nil { + utils.Debugf("saving stopped state to disk %s", err) } info := daemon.execDriver.Info(container.ID) @@ -211,8 +205,6 @@ func (daemon *Daemon) Register(container *Container) error { utils.Debugf("restart unmount error %s", err) } - container.State.SetGhost(false) - container.State.SetStopped(0) if err := container.Start(); err != nil { return err } diff --git a/daemon/state.go b/daemon/state.go index aabb5e43ba..562929c87a 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -14,7 +14,6 @@ type State struct { ExitCode int StartedAt time.Time FinishedAt time.Time - Ghost bool } // String returns a human-readable description of the state @@ -23,9 +22,6 @@ func (s *State) String() string { defer s.RUnlock() if s.Running { - if s.Ghost { - return fmt.Sprintf("Ghost") - } return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.FinishedAt.IsZero() { @@ -41,13 +37,6 @@ func (s *State) IsRunning() bool { return s.Running } -func (s *State) IsGhost() bool { - s.RLock() - defer s.RUnlock() - - return s.Ghost -} - func (s *State) GetExitCode() int { s.RLock() defer s.RUnlock() @@ -55,19 +44,11 @@ func (s *State) GetExitCode() int { return s.ExitCode } -func (s *State) SetGhost(val bool) { - s.Lock() - defer s.Unlock() - - s.Ghost = val -} - func (s *State) SetRunning(pid int) { s.Lock() defer s.Unlock() s.Running = true - s.Ghost = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() From 2c4cebe916299c7485e929e49ebc6d263e3769cf Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 22 Apr 2014 06:26:44 -0600 Subject: [PATCH 006/219] Move "possible config locations" list to the top of check-config.sh Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/check-config.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index cbdb90bcce..498ede8af3 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -4,7 +4,13 @@ set -e # bits of this were adapted from lxc-checkconfig # see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in -: ${CONFIG:=/proc/config.gz} +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) +: ${CONFIG:="${possibleConfigs[0]}"} if ! command -v zgrep &> /dev/null; then zgrep() { @@ -74,11 +80,7 @@ check_flags() { if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." - for tryConfig in \ - '/proc/config.gz' \ - "/boot/config-$(uname -r)" \ - '/usr/src/linux/.config' \ - ; do + for tryConfig in "${possibleConfigs[@]}"; do if [ -e "$tryConfig" ]; then CONFIG="$tryConfig" break From 4353f25e10d19f414bdd5b20488824fb2b834d6e Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Tue, 22 Apr 2014 16:51:06 -0700 Subject: [PATCH 007/219] engine.Installer: a standard interface for "installable" services Installer is a standard interface for objects which can "install" themselves an engine by registering handlers. This can be used as an entrypoint for external plugins etc. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/engine.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/engine/engine.go b/engine/engine.go index 6a54b3591e..7e1f192a2b 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -12,6 +12,13 @@ import ( "strings" ) +// Installer is a standard interface for objects which can "install" themselves +// on an engine by registering handlers. +// This can be used as an entrypoint for external plugins etc. +type Installer interface { + Install(*Engine) error +} + type Handler func(*Job) Status var globalHandlers map[string]Handler From 672edfe807c597a1c245bce996a150dfdf273a3c Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 16:53:43 -0700 Subject: [PATCH 008/219] Remove the concept of a root dir out of engine This makes the engine more general purpose so that we can use it and the job routing functionality for reexec'ing our binary Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) Conflicts: integration/runtime_test.go --- docker/docker.go | 30 +++++++++++++++++++++++++- engine/engine.go | 42 +++---------------------------------- engine/engine_test.go | 42 ------------------------------------- engine/helpers_test.go | 7 +------ engine/job_test.go | 6 ------ integration/runtime_test.go | 7 +++---- integration/server_test.go | 2 +- integration/utils_test.go | 2 +- 8 files changed, 38 insertions(+), 100 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index a4cbe8d865..aed3b0778d 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "log" "os" + "runtime" "strings" "github.com/dotcloud/docker/api" @@ -121,7 +122,10 @@ func main() { } } - eng, err := engine.New(realRoot) + if err := checkKernelAndArch(); err != nil { + log.Fatal(err) + } + eng, err := engine.New() if err != nil { log.Fatal(err) } @@ -239,3 +243,27 @@ func main() { func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } + +func checkKernelAndArch() error { + // Check for unsupported architectures + if runtime.GOARCH != "amd64" { + return fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) + } + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.8 crashes are clearer. + // For details see http://github.com/dotcloud/docker/issues/407 + if k, err := utils.GetKernelVersion(); err != nil { + log.Printf("WARNING: %s\n", err) + } else { + if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) + } + } + } + return nil +} diff --git a/engine/engine.go b/engine/engine.go index 6a54b3591e..9cc1bd5004 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -5,9 +5,7 @@ import ( "fmt" "github.com/dotcloud/docker/utils" "io" - "log" "os" - "runtime" "sort" "strings" ) @@ -37,7 +35,6 @@ func unregister(name string) { // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. type Engine struct { - root string handlers map[string]Handler hack Hack // data for temporary hackery (see hack.go) id string @@ -47,10 +44,6 @@ type Engine struct { Logging bool } -func (eng *Engine) Root() string { - return eng.root -} - func (eng *Engine) Register(name string, handler Handler) error { _, exists := eng.handlers[name] if exists { @@ -60,38 +53,9 @@ func (eng *Engine) Register(name string, handler Handler) error { return nil } -// New initializes a new engine managing the directory specified at `root`. -// `root` is used to store containers and any other state private to the engine. -// Changing the contents of the root without executing a job will cause unspecified -// behavior. -func New(root string) (*Engine, error) { - // Check for unsupported architectures - if runtime.GOARCH != "amd64" { - return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) - } - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.8 crashes are clearer. - // For details see http://github.com/dotcloud/docker/issues/407 - if k, err := utils.GetKernelVersion(); err != nil { - log.Printf("WARNING: %s\n", err) - } else { - if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) - } - } - } - - if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - +// New initializes a new engine. +func New() (*Engine, error) { eng := &Engine{ - root: root, handlers: make(map[string]Handler), id: utils.RandomString(), Stdout: os.Stdout, @@ -113,7 +77,7 @@ func New(root string) (*Engine, error) { } func (eng *Engine) String() string { - return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8]) + return fmt.Sprintf("%s", eng.id[:8]) } // Commands returns a list of all currently registered commands, diff --git a/engine/engine_test.go b/engine/engine_test.go index a16c352678..63c4660eb1 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -2,10 +2,6 @@ package engine import ( "bytes" - "io/ioutil" - "os" - "path" - "path/filepath" "strings" "testing" ) @@ -67,7 +63,6 @@ func TestJob(t *testing.T) { func TestEngineCommands(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) handler := func(job *Job) Status { return StatusOK } eng.Register("foo", handler) eng.Register("bar", handler) @@ -83,44 +78,9 @@ func TestEngineCommands(t *testing.T) { } } -func TestEngineRoot(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - // We expect Root to resolve to an absolute path. - // FIXME: this should not be necessary. - // Until the above FIXME is implemented, let's check for the - // current behavior. - tmp, err = filepath.EvalSymlinks(tmp) - if err != nil { - t.Fatal(err) - } - tmp, err = filepath.Abs(tmp) - if err != nil { - t.Fatal(err) - } - dir := path.Join(tmp, "dir") - eng, err := New(dir) - if err != nil { - t.Fatal(err) - } - if st, err := os.Stat(dir); err != nil { - t.Fatal(err) - } else if !st.IsDir() { - t.Fatalf("engine.New() created something other than a directory at %s", dir) - } - if r := eng.Root(); r != dir { - t.Fatalf("Expected: %v\nReceived: %v", dir, r) - } -} - func TestEngineString(t *testing.T) { eng1 := newTestEngine(t) - defer os.RemoveAll(eng1.Root()) eng2 := newTestEngine(t) - defer os.RemoveAll(eng2.Root()) s1 := eng1.String() s2 := eng2.String() if eng1 == eng2 { @@ -130,7 +90,6 @@ func TestEngineString(t *testing.T) { func TestEngineLogf(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) input := "Test log line" if n, err := eng.Logf("%s\n", input); err != nil { t.Fatal(err) @@ -141,7 +100,6 @@ func TestEngineLogf(t *testing.T) { func TestParseJob(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) // Verify that the resulting job calls to the right place var called bool eng.Register("echo", func(job *Job) Status { diff --git a/engine/helpers_test.go b/engine/helpers_test.go index 488529fc7f..a8d3dfc4d4 100644 --- a/engine/helpers_test.go +++ b/engine/helpers_test.go @@ -1,18 +1,13 @@ package engine import ( - "github.com/dotcloud/docker/utils" "testing" ) var globalTestID string func newTestEngine(t *testing.T) *Engine { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - eng, err := New(tmp) + eng, err := New() if err != nil { t.Fatal(err) } diff --git a/engine/job_test.go b/engine/job_test.go index 50d882c44b..ace398f934 100644 --- a/engine/job_test.go +++ b/engine/job_test.go @@ -1,13 +1,11 @@ package engine import ( - "os" "testing" ) func TestJobStatusOK(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) eng.Register("return_ok", func(job *Job) Status { return StatusOK }) err := eng.Job("return_ok").Run() if err != nil { @@ -17,7 +15,6 @@ func TestJobStatusOK(t *testing.T) { func TestJobStatusErr(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) eng.Register("return_err", func(job *Job) Status { return StatusErr }) err := eng.Job("return_err").Run() if err == nil { @@ -27,7 +24,6 @@ func TestJobStatusErr(t *testing.T) { func TestJobStatusNotFound(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) err := eng.Job("return_not_found").Run() if err == nil { @@ -37,7 +33,6 @@ func TestJobStatusNotFound(t *testing.T) { func TestJobStdoutString(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stdout", func(job *Job) Status { job.Printf("Hello world\n") @@ -59,7 +54,6 @@ func TestJobStdoutString(t *testing.T) { func TestJobStderrString(t *testing.T) { eng := newTestEngine(t) - defer os.RemoveAll(eng.Root()) // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stderr", func(job *Job) Status { job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 38b3277afd..497d1c51c8 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -627,10 +627,9 @@ func TestRestore(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - eng = newTestEngine(t, false, eng.Root()) - daemon2 := mkDaemonFromEngine(eng, t) - if len(daemon2.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(daemon2.List())) + eng = newTestEngine(t, false, runtime.Config().Root) + if len(runtime2.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } runningCount := 0 for _, c := range daemon2.List() { diff --git a/integration/server_test.go b/integration/server_test.go index cb3063ded7..226247556d 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -149,7 +149,7 @@ func TestRestartKillWait(t *testing.T) { t.Fatal(err) } - eng = newTestEngine(t, false, eng.Root()) + eng = newTestEngine(t, false, runtime.Config().Root) srv = mkServerFromEngine(eng, t) job = srv.Eng.Job("containers") diff --git a/integration/utils_test.go b/integration/utils_test.go index ab9ca5b72d..6e2b8abc9e 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -181,7 +181,7 @@ func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engin root = dir } } - eng, err := engine.New(root) + eng, err := engine.New() if err != nil { t.Fatal(err) } From 87e8d7754e218d93b480b94537d07793f6680515 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 22 Apr 2014 19:24:47 -0700 Subject: [PATCH 009/219] Update tests with engine root removal Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/server/server_unit_test.go | 14 +------------- integration/runtime_test.go | 7 ++++--- integration/utils_test.go | 1 + 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go index 561f47d343..2dcd0df790 100644 --- a/api/server/server_unit_test.go +++ b/api/server/server_unit_test.go @@ -6,11 +6,9 @@ import ( "fmt" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" "io" "net/http" "net/http/httptest" - "os" "testing" ) @@ -60,7 +58,6 @@ func TesthttpError(t *testing.T) { func TestGetVersion(t *testing.T) { eng := tmpEngine(t) - defer rmEngine(eng) var called bool eng.Register("version", func(job *engine.Job) engine.Status { called = true @@ -90,7 +87,6 @@ func TestGetVersion(t *testing.T) { func TestGetInfo(t *testing.T) { eng := tmpEngine(t) - defer rmEngine(eng) var called bool eng.Register("info", func(job *engine.Job) engine.Status { called = true @@ -131,21 +127,13 @@ func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t * } func tmpEngine(t *testing.T) *engine.Engine { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - eng, err := engine.New(tmp) + eng, err := engine.New() if err != nil { t.Fatal(err) } return eng } -func rmEngine(eng *engine.Engine) { - os.RemoveAll(eng.Root()) -} - func readEnv(src io.Reader, t *testing.T) *engine.Env { out := engine.NewOutput() v, err := out.AddEnv() diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 497d1c51c8..bf00437547 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -627,9 +627,10 @@ func TestRestore(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - eng = newTestEngine(t, false, runtime.Config().Root) - if len(runtime2.List()) != 2 { - t.Errorf("Expected 2 container, %v found", len(runtime2.List())) + eng = newTestEngine(t, false, daemon1.Config().Root) + daemon2 := mkDaemonFromEngine(eng, t) + if len(daemon2.List()) != 2 { + t.Errorf("Expected 2 container, %v found", len(daemon2.List())) } runningCount := 0 for _, c := range daemon2.List() { diff --git a/integration/utils_test.go b/integration/utils_test.go index 6e2b8abc9e..f455657705 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -181,6 +181,7 @@ func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engin root = dir } } + os.MkdirAll(root, 0700) eng, err := engine.New() if err != nil { t.Fatal(err) From a5b4a907485c76088d02226df62b0a27bd76117f Mon Sep 17 00:00:00 2001 From: William Henry Date: Tue, 22 Apr 2014 22:17:33 -0600 Subject: [PATCH 010/219] Fixed an missing '-' on the "--rm" Docker-DCO-1.1-Signed-off-by: William Henry (github: ipbabble) Changes to be committed: modified: contrib/man/md/docker-build.md --- contrib/man/md/docker-build.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/man/md/docker-build.md b/contrib/man/md/docker-build.md index 679dc577c4..c61ee63940 100644 --- a/contrib/man/md/docker-build.md +++ b/contrib/man/md/docker-build.md @@ -6,7 +6,7 @@ docker-build - Build a container image from a Dockerfile source at PATH # SYNOPSIS **docker build** [**--no-cache**[=*false*] [**-q**|**--quiet**[=*false*] - [**-rm**] [**-t**|**--tag**=*tag*] PATH | URL | - + [**--rm**] [**-t**|**--tag**=*tag*] PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. From a6fd2c237ecca330554ec8bdaf7b2acfe68de1ce Mon Sep 17 00:00:00 2001 From: William Henry Date: Tue, 22 Apr 2014 22:35:59 -0600 Subject: [PATCH 011/219] Fixed some missing ']' and the TAG Docker-DCO-1.1-Signed-off-by: William Henry (github: ipbabble) Changes to be committed: modified: contrib/man/md/docker-build.1.md --- contrib/man/md/docker-build.1.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/man/md/docker-build.1.md b/contrib/man/md/docker-build.1.md index c61ee63940..b3e9a2842e 100644 --- a/contrib/man/md/docker-build.1.md +++ b/contrib/man/md/docker-build.1.md @@ -5,8 +5,8 @@ docker-build - Build a container image from a Dockerfile source at PATH # SYNOPSIS -**docker build** [**--no-cache**[=*false*] [**-q**|**--quiet**[=*false*] - [**--rm**] [**-t**|**--tag**=*tag*] PATH | URL | - +**docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]] + [**--rm**] [**-t**|**--tag**=TAG] PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. From 086b3208ea0f152f5c9501805fd0d8d1c95c7aad Mon Sep 17 00:00:00 2001 From: Roland Moriz Date: Wed, 23 Apr 2014 09:17:57 +0200 Subject: [PATCH 012/219] fixed broken link to chef community site --- docs/sources/use/chef.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/use/chef.md b/docs/sources/use/chef.md index 87e3215ced..b35391dca5 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/use/chef.md @@ -19,7 +19,7 @@ operating systems. ## Installation The cookbook is available on the [Chef Community -Site](community.opscode.com/cookbooks/docker) and can be installed using +Site](http://community.opscode.com/cookbooks/docker) and can be installed using your favorite cookbook dependency manager. The source can be found on From 9d38fd0eefb2df0bf087a67fad2ad852f28e2e96 Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Fri, 18 Apr 2014 00:20:18 +0300 Subject: [PATCH 013/219] Development docs warning for beta-docs.docker.io Update: Fix alignment issues of the "warning" block. Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) Docker-DCO-1.1-Signed-off-by: O.S.Tezer (github: SvenDowideit) --- docs/theme/mkdocs/base.html | 1 + docs/theme/mkdocs/beta_warning.html | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 docs/theme/mkdocs/beta_warning.html diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html index 766afc0c8a..6f396e044a 100644 --- a/docs/theme/mkdocs/base.html +++ b/docs/theme/mkdocs/base.html @@ -48,6 +48,7 @@
{% include "breadcrumbs.html" %}
+ {% include "beta_warning.html" %} {{ content }}
diff --git a/docs/theme/mkdocs/beta_warning.html b/docs/theme/mkdocs/beta_warning.html new file mode 100644 index 0000000000..943633e250 --- /dev/null +++ b/docs/theme/mkdocs/beta_warning.html @@ -0,0 +1,27 @@ + +
+

You are looking at the beta docs for the development version of Docker.

+There is a chance of them being different from the prior versions. +
From dc982d3053bf46e03ad27c29d4662a9d5ba5e819 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 23 Apr 2014 16:28:06 +1000 Subject: [PATCH 014/219] make the non-release doc warning conditional and add version info use the beta-warning area to tell the user what VERSION of docker, git branch, and links to the official release version docs are. requires / extends PR #5272 Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- Makefile | 3 +++ docs/Dockerfile | 5 +++++ docs/theme/mkdocs/base.html | 2 +- docs/theme/mkdocs/beta_warning.html | 9 +++++++-- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index d358678223..e020c14eac 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,9 @@ build: bundles docker build -t "$(DOCKER_IMAGE)" . docs-build: + cp ./VERSION docs/VERSION + echo "$(GIT_BRANCH)" > docs/GIT_BRANCH + echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET docker build -t "$(DOCKER_DOCS_IMAGE)" docs bundles: diff --git a/docs/Dockerfile b/docs/Dockerfile index d832dcb798..9b968ca847 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -39,6 +39,11 @@ WORKDIR /docs #convert to markdown #RUN ./convert.sh +RUN VERSION=$(cat /docs/VERSION) &&\ + GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\ + AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\ + echo "{% set docker_version = \"${VERSION}\" %}{% set docker_branch = \"${GIT_BRANCH}\" %}{% set aws_bucket = \"${AWS_S3_BUCKET}\" %}{% include \"beta_warning.html\" %}" > /docs/theme/mkdocs/version.html + # note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525 EXPOSE 8000 diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html index 6f396e044a..5371569596 100644 --- a/docs/theme/mkdocs/base.html +++ b/docs/theme/mkdocs/base.html @@ -48,7 +48,7 @@
{% include "breadcrumbs.html" %}
- {% include "beta_warning.html" %} + {% include "version.html" %} {{ content }}
diff --git a/docs/theme/mkdocs/beta_warning.html b/docs/theme/mkdocs/beta_warning.html index 943633e250..b7ffd28a9a 100644 --- a/docs/theme/mkdocs/beta_warning.html +++ b/docs/theme/mkdocs/beta_warning.html @@ -1,3 +1,4 @@ +{% if aws_bucket != "docs.docker.io" %}
-

You are looking at the beta docs for the development version of Docker.

-There is a chance of them being different from the prior versions. +

This is the + {% if docker_version != docker_version|replace("-dev", "bingo") %}{{ docker_branch }} development branch{% else %}beta{% endif %} + documentation for Docker version {{ docker_version }}.

+ Please go to http://docs.docker.io for the current Docker release documentation. + {{ aws_bucket }}
+{% endif %} From 73d9ede12c9328c44e38699dbe3a04479d3926e6 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Wed, 23 Apr 2014 13:50:53 +0200 Subject: [PATCH 015/219] devicemapper: Don't mount in Create() We used to mount in Create() to be able to create a few files that needs to be in each device. However, this mount is problematic for selinux, as we need to set the mount label at mount-time, and it is not known at the time of Create(). This change just moves the file creation to first Get() call and drops the mount from Create(). Additionally, this lets us remove some complexities we had to avoid an extra unmount+mount cycle. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- daemon/graphdriver/devmapper/deviceset.go | 44 +------------- daemon/graphdriver/devmapper/driver.go | 64 +++++++++------------ daemon/graphdriver/devmapper/driver_test.go | 11 ---- 3 files changed, 30 insertions(+), 89 deletions(-) diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 97d670a3d9..640bebd32b 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -35,12 +35,6 @@ type DevInfo struct { mountCount int `json:"-"` mountPath string `json:"-"` - // A floating mount means one reference is not owned and - // will be stolen by the next mount. This allows us to - // avoid unmounting directly after creation before the - // first get (since we need to mount to set up the device - // a bit first). - floating bool `json:"-"` // The global DeviceSet lock guarantees that we serialize all // the calls to libdevmapper (which is not threadsafe), but we @@ -94,14 +88,6 @@ type DevStatus struct { HighestMappedSector uint64 } -type UnmountMode int - -const ( - UnmountRegular UnmountMode = iota - UnmountFloat - UnmountSink -) - func getDevName(name string) string { return "/dev/mapper/" + name } @@ -876,12 +862,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) } - if info.floating { - // Steal floating ref - info.floating = false - } else { - info.mountCount++ - } + info.mountCount++ return nil } @@ -903,13 +884,12 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro info.mountCount = 1 info.mountPath = path - info.floating = false return devices.setInitialized(info) } -func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) +func (devices *DeviceSet) UnmountDevice(hash string) error { + utils.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) defer utils.Debugf("[devmapper] UnmountDevice END") info, err := devices.lookupDevice(hash) @@ -923,24 +903,6 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { devices.Lock() defer devices.Unlock() - if mode == UnmountFloat { - if info.floating { - return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) - } - - // Leave this reference floating - info.floating = true - return nil - } - - if mode == UnmountSink { - if !info.floating { - // Someone already sunk this - return nil - } - // Otherwise, treat this as a regular unmount - } - if info.mountCount == 0 { return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) } diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index e958ef3e59..66c4cb0767 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -64,26 +64,6 @@ func (d *Driver) Create(id, parent string, mountLabel string) error { if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return err - } - - if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) { - return err - } - - // Create an "id" file with the container/image id in it to help reconscruct this in case - // of later problems - if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil { - return err - } - - // We float this reference so that the next Get call can - // steal it, so we don't have to unmount - if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { - return err - } return nil } @@ -96,10 +76,6 @@ func (d *Driver) Remove(id string) error { return nil } - // Sink the float from create in case no Get() call was made - if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { - return err - } // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id); err != nil { return err @@ -115,28 +91,42 @@ func (d *Driver) Remove(id string) error { func (d *Driver) Get(id string) (string, error) { mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { + + // Create the target directories if they don't exist + if err := osMkdirAll(mp, 0755); err != nil && !osIsExist(err) { return "", err } - return path.Join(mp, "rootfs"), nil + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, ""); err != nil { + return "", err + } + + rootFs := path.Join(mp, "rootfs") + if err := osMkdirAll(rootFs, 0755); err != nil && !osIsExist(err) { + d.DeviceSet.UnmountDevice(id) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := osStat(idFile); err != nil && osIsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.DeviceSet.UnmountDevice(id) + return "", err + } + } + + return rootFs, nil } func (d *Driver) Put(id string) { - if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { + if err := d.DeviceSet.UnmountDevice(id); err != nil { utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) } } -func (d *Driver) mount(id, mountPoint string) error { - // Create the target directories if they don't exist - if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { - return err - } - // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint, "") -} - func (d *Driver) Exists(id string) bool { - return d.Devices[id] != nil + return d.DeviceSet.HasDevice(id) } diff --git a/daemon/graphdriver/devmapper/driver_test.go b/daemon/graphdriver/devmapper/driver_test.go index d431f942aa..77e8a6013a 100644 --- a/daemon/graphdriver/devmapper/driver_test.go +++ b/daemon/graphdriver/devmapper/driver_test.go @@ -500,15 +500,10 @@ func TestDriverCreate(t *testing.T) { calls.Assert(t, "DmTaskCreate", "DmTaskGetInfo", - "sysMount", "DmTaskRun", - "DmTaskSetTarget", "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", "DmTaskSetName", "DmTaskSetMessage", - "DmTaskSetAddNode", ) }() @@ -619,15 +614,10 @@ func TestDriverRemove(t *testing.T) { calls.Assert(t, "DmTaskCreate", "DmTaskGetInfo", - "sysMount", "DmTaskRun", - "DmTaskSetTarget", "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", "DmTaskSetName", "DmTaskSetMessage", - "DmTaskSetAddNode", ) Mounted = func(mnt string) (bool, error) { @@ -650,7 +640,6 @@ func TestDriverRemove(t *testing.T) { "DmTaskSetTarget", "DmTaskSetAddNode", "DmUdevWait", - "sysUnmount", ) }() runtime.GC() From b3ddc31b9581665eb15dedd0aa45bd37c1eb6815 Mon Sep 17 00:00:00 2001 From: Daniel Norberg Date: Tue, 22 Apr 2014 16:56:18 -0400 Subject: [PATCH 016/219] avoid suicide container.Kill() might read a pid of 0 from container.State.Pid due to losing a race with container.monitor() calling container.State.SetStopped(). Sending a SIGKILL to pid 0 is undesirable as "If pid equals 0, then sig is sent to every process in the process group of the calling process." Docker-DCO-1.1-Signed-off-by: Daniel Norberg (github: danielnorberg) --- daemon/container.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 6f63d565f2..c06fd2c074 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -654,9 +654,12 @@ func (container *Container) Kill() error { // 2. Wait for the process to die, in last resort, try to kill the process directly if err := container.WaitTimeout(10 * time.Second); err != nil { - log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) - if err := syscall.Kill(container.State.Pid, 9); err != nil { - return err + // Ensure that we don't kill ourselves + if pid := container.State.Pid; pid != 0 { + log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + return err + } } } From 7100ace42bda2660d1eaecb2ec096ba6753688ea Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 23 Apr 2014 11:54:35 -0700 Subject: [PATCH 017/219] Remove error from engine.New() Without creating a root there is no way for the engine to return an error from the new function. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/server/server_unit_test.go | 12 ++---------- docker/docker.go | 7 ++----- engine/engine.go | 4 ++-- engine/engine_test.go | 14 +++++++------- engine/helpers_test.go | 10 +--------- engine/job_test.go | 10 +++++----- integration/utils_test.go | 6 ++---- 7 files changed, 21 insertions(+), 42 deletions(-) diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go index 2dcd0df790..8ab34127ac 100644 --- a/api/server/server_unit_test.go +++ b/api/server/server_unit_test.go @@ -57,7 +57,7 @@ func TesthttpError(t *testing.T) { } func TestGetVersion(t *testing.T) { - eng := tmpEngine(t) + eng := engine.New() var called bool eng.Register("version", func(job *engine.Job) engine.Status { called = true @@ -86,7 +86,7 @@ func TestGetVersion(t *testing.T) { } func TestGetInfo(t *testing.T) { - eng := tmpEngine(t) + eng := engine.New() var called bool eng.Register("info", func(job *engine.Job) engine.Status { called = true @@ -126,14 +126,6 @@ func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t * return r } -func tmpEngine(t *testing.T) *engine.Engine { - eng, err := engine.New() - if err != nil { - t.Fatal(err) - } - return eng -} - func readEnv(src io.Reader, t *testing.T) *engine.Env { out := engine.NewOutput() v, err := out.AddEnv() diff --git a/docker/docker.go b/docker/docker.go index aed3b0778d..7a4ddc72a5 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -121,14 +121,11 @@ func main() { log.Fatalf("Unable to get the full path to root (%s): %s", root, err) } } - if err := checkKernelAndArch(); err != nil { log.Fatal(err) } - eng, err := engine.New() - if err != nil { - log.Fatal(err) - } + + eng := engine.New() // Load builtins builtins.Register(eng) // load the daemon in the background so we can immediately start diff --git a/engine/engine.go b/engine/engine.go index 9cc1bd5004..58c37ab933 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -54,7 +54,7 @@ func (eng *Engine) Register(name string, handler Handler) error { } // New initializes a new engine. -func New() (*Engine, error) { +func New() *Engine { eng := &Engine{ handlers: make(map[string]Handler), id: utils.RandomString(), @@ -73,7 +73,7 @@ func New() (*Engine, error) { for k, v := range globalHandlers { eng.handlers[k] = v } - return eng, nil + return eng } func (eng *Engine) String() string { diff --git a/engine/engine_test.go b/engine/engine_test.go index 63c4660eb1..8023bd58f3 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -17,7 +17,7 @@ func TestRegister(t *testing.T) { // Register is global so let's cleanup to avoid conflicts defer unregister("dummy1") - eng := newTestEngine(t) + eng := New() //Should fail because global handlers are copied //at the engine creation @@ -36,7 +36,7 @@ func TestRegister(t *testing.T) { } func TestJob(t *testing.T) { - eng := newTestEngine(t) + eng := New() job1 := eng.Job("dummy1", "--level=awesome") if job1.handler != nil { @@ -62,7 +62,7 @@ func TestJob(t *testing.T) { } func TestEngineCommands(t *testing.T) { - eng := newTestEngine(t) + eng := New() handler := func(job *Job) Status { return StatusOK } eng.Register("foo", handler) eng.Register("bar", handler) @@ -79,8 +79,8 @@ func TestEngineCommands(t *testing.T) { } func TestEngineString(t *testing.T) { - eng1 := newTestEngine(t) - eng2 := newTestEngine(t) + eng1 := New() + eng2 := New() s1 := eng1.String() s2 := eng2.String() if eng1 == eng2 { @@ -89,7 +89,7 @@ func TestEngineString(t *testing.T) { } func TestEngineLogf(t *testing.T) { - eng := newTestEngine(t) + eng := New() input := "Test log line" if n, err := eng.Logf("%s\n", input); err != nil { t.Fatal(err) @@ -99,7 +99,7 @@ func TestEngineLogf(t *testing.T) { } func TestParseJob(t *testing.T) { - eng := newTestEngine(t) + eng := New() // Verify that the resulting job calls to the right place var called bool eng.Register("echo", func(job *Job) Status { diff --git a/engine/helpers_test.go b/engine/helpers_test.go index a8d3dfc4d4..cfa11da7cd 100644 --- a/engine/helpers_test.go +++ b/engine/helpers_test.go @@ -6,14 +6,6 @@ import ( var globalTestID string -func newTestEngine(t *testing.T) *Engine { - eng, err := New() - if err != nil { - t.Fatal(err) - } - return eng -} - func mkJob(t *testing.T, name string, args ...string) *Job { - return newTestEngine(t).Job(name, args...) + return New().Job(name, args...) } diff --git a/engine/job_test.go b/engine/job_test.go index ace398f934..1f927cbafc 100644 --- a/engine/job_test.go +++ b/engine/job_test.go @@ -5,7 +5,7 @@ import ( ) func TestJobStatusOK(t *testing.T) { - eng := newTestEngine(t) + eng := New() eng.Register("return_ok", func(job *Job) Status { return StatusOK }) err := eng.Job("return_ok").Run() if err != nil { @@ -14,7 +14,7 @@ func TestJobStatusOK(t *testing.T) { } func TestJobStatusErr(t *testing.T) { - eng := newTestEngine(t) + eng := New() eng.Register("return_err", func(job *Job) Status { return StatusErr }) err := eng.Job("return_err").Run() if err == nil { @@ -23,7 +23,7 @@ func TestJobStatusErr(t *testing.T) { } func TestJobStatusNotFound(t *testing.T) { - eng := newTestEngine(t) + eng := New() eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) err := eng.Job("return_not_found").Run() if err == nil { @@ -32,7 +32,7 @@ func TestJobStatusNotFound(t *testing.T) { } func TestJobStdoutString(t *testing.T) { - eng := newTestEngine(t) + eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stdout", func(job *Job) Status { job.Printf("Hello world\n") @@ -53,7 +53,7 @@ func TestJobStdoutString(t *testing.T) { } func TestJobStderrString(t *testing.T) { - eng := newTestEngine(t) + eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stderr", func(job *Job) Status { job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") diff --git a/integration/utils_test.go b/integration/utils_test.go index f455657705..6901662ce6 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -182,10 +182,8 @@ func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engin } } os.MkdirAll(root, 0700) - eng, err := engine.New() - if err != nil { - t.Fatal(err) - } + + eng := engine.New() // Load default plugins builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) From 6e6b8b69cdac5154afa823255252bca5c4890c6e Mon Sep 17 00:00:00 2001 From: Sindhu S Date: Thu, 24 Apr 2014 01:02:54 +0530 Subject: [PATCH 018/219] Update link to interactive tutorial --- docs/sources/introduction/working-with-docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index f395723d60..86865428e1 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -54,7 +54,7 @@ accessed by running the `docker` binary. > **Tip:** The below instructions can be considered a summary of our > *interactive tutorial*. If you prefer a more hands-on approach without > installing anything, why not give that a shot and check out the -> [Docker Interactive Tutorial](http://www.docker.io/interactivetutorial). +> [Docker Interactive Tutorial](https://www.docker.io/gettingstarted/#h_tutorial). The `docker` client usage consists of passing a chain of arguments: From 81477a204f910fb5ce518a62c64047a0aa9ce017 Mon Sep 17 00:00:00 2001 From: Sindhu S Date: Thu, 24 Apr 2014 01:59:52 +0530 Subject: [PATCH 019/219] Update link to interactive tutorial --- docs/sources/introduction/working-with-docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index 86865428e1..637030acbc 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -54,7 +54,7 @@ accessed by running the `docker` binary. > **Tip:** The below instructions can be considered a summary of our > *interactive tutorial*. If you prefer a more hands-on approach without > installing anything, why not give that a shot and check out the -> [Docker Interactive Tutorial](https://www.docker.io/gettingstarted/#h_tutorial). +> [Docker Interactive Tutorial](https://www.docker.io/gettingstarted). The `docker` client usage consists of passing a chain of arguments: From f90029611fec082a41b5629e43a88a39f0674fe2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 23 Apr 2014 13:47:56 -0700 Subject: [PATCH 020/219] Add exported status code from a job This allows the job's status code to be consumed externally so that we can use it as an exit code or saving to a state file. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- engine/job.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/engine/job.go b/engine/job.go index 50d64011f9..b56155ac1c 100644 --- a/engine/job.go +++ b/engine/job.go @@ -208,3 +208,7 @@ func (job *Job) Error(err error) Status { fmt.Fprintf(job.Stderr, "%s\n", err) return StatusErr } + +func (job *Job) StatusCode() int { + return int(job.status) +} From e62efb266fd878453b8562e60b06a3e579fe571a Mon Sep 17 00:00:00 2001 From: Mike MacCana Date: Fri, 14 Feb 2014 12:16:26 +0000 Subject: [PATCH 021/219] - unix://path/to/socket should read unix:///path/to/socket like the rest of the documentation (a slash was missing) - Mention that [] options may be specified multiple times on the Usage page Docker-DCO-1.1-Signed-off-by: Mike MacCana (github: mikemaccana) Docker-DCO-1.1-Signed-off-by: Mike MacCana (github: SvenDowideit) --- docker/docker.go | 2 +- docs/sources/reference/commandline/cli.rst | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index 7a4ddc72a5..d10202398b 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -67,7 +67,7 @@ func main() { ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") - flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") + flag.Var(&flHosts, []string{"H", "-host"}, "The socket to bind to in daemon mode, specified using tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Parse() diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 87c08eb4b4..469434413a 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -63,10 +63,10 @@ only be specified once. :: - Usage of docker: + Usage: -D, --debug=false: Enable debug mode - -H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd]. -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group + -H, --host=[]: The socket to bind to in daemon mode, specified using tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --api-enable-cors=false: Enable CORS headers in the remote API -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b @@ -90,6 +90,8 @@ only be specified once. --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available + Options with [] may be specified multiple times. + The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you provide the ``-d`` flag. From b2c87fe08b71523eb20fed63e5da3bef30b756ec Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 24 Apr 2014 11:09:27 +1000 Subject: [PATCH 022/219] add a reference to multiple -H options, and update the other example of -H option and copy changes to the cli.md file Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docker/docker.go | 2 +- docs/sources/reference/commandline/cli.md | 6 ++++-- docs/sources/reference/commandline/cli.rst | 6 +++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index d10202398b..4d90ab8b2e 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -67,7 +67,7 @@ func main() { ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") - flag.Var(&flHosts, []string{"H", "-host"}, "The socket to bind to in daemon mode, specified using tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") + flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Parse() diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e0d896755b..efdf874e84 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -9,7 +9,7 @@ no parameters or execute `docker help`: $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] - -H=[unix:///var/run/docker.sock]: tcp://[host]:port to bind/connect to or unix://[/path/to/socket] to use. When host=[127.0.0.1] is omitted for tcp or path=[/var/run/docker.sock] is omitted for unix sockets, default values are used. + -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. A self-sufficient runtime for linux containers. @@ -53,7 +53,7 @@ expect an integer, and they can only be specified once. Usage of docker: -D, --debug=false: Enable debug mode - -H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd]. + -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group --api-enable-cors=false: Enable CORS headers in the remote API -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking @@ -78,6 +78,8 @@ expect an integer, and they can only be specified once. --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available + Options with [] may be specified multiple times. + The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you provide the `-d` flag. diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 469434413a..9c1d3ae4be 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -12,7 +12,7 @@ To list available commands, either run ``docker`` with no parameters or execute $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] - -H=[unix:///var/run/docker.sock]: tcp://[host]:port to bind/connect to or unix://[/path/to/socket] to use. When host=[127.0.0.1] is omitted for tcp or path=[/var/run/docker.sock] is omitted for unix sockets, default values are used. + -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. A self-sufficient runtime for linux containers. @@ -63,10 +63,10 @@ only be specified once. :: - Usage: + Usage of docker: -D, --debug=false: Enable debug mode -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group - -H, --host=[]: The socket to bind to in daemon mode, specified using tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. --api-enable-cors=false: Enable CORS headers in the remote API -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b From d9f9021e9b8bc46481b4c2bb28786fd3f0b88b9c Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Thu, 24 Apr 2014 03:31:02 +0300 Subject: [PATCH 023/219] Docs: Add docs new version files created by `make docs` to .gitignore Files are: - docs/AWS_S3_BUCKET - docs/GIT_BRANCH - docs/VERSION Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) Update: - Remove unnecessary last blankline --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 0087b47302..4f8f09c775 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,6 @@ bundles/ vendor/pkg/ pyenv Vagrantfile +docs/AWS_S3_BUCKET +docs/GIT_BRANCH +docs/VERSION From c932667cd26f00bb64ddf0c1a7c7de3ac95aa4be Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Wed, 23 Apr 2014 23:48:28 +0300 Subject: [PATCH 024/219] Docs auto-conversion fixes and MD marking and structure improvements. - Remove redundant chars and all errors caused by RST->MD conversion. e.g. [/#, /\, \<, />, etc.] - Fix broken inter-document links - Fix outbound links no-longer active or changed - Fix lists - Fix code blocks - Correct apostrophes - Replace redundant inline note marks for code with code marks - Fix broken image links - Remove non-functional title links - Correct broken cross-docs links - Improve readability Note: This PR does not try to fix/amend: - Grammatical errors - Lexical errors - Linguistic-logic errors etc. It just aims to fix main structural or conversion errors to serve as a base for further amendments that will cover others including but not limited to those mentioned above. Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) Update: - Fix backtick issues Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/articles.md | 6 +- docs/sources/articles/baseimages.md | 33 +- docs/sources/articles/runmetrics.md | 295 ++++---- docs/sources/articles/security.md | 133 ++-- docs/sources/contributing.md | 4 +- docs/sources/contributing/contributing.md | 20 +- docs/sources/contributing/devenvironment.md | 24 +- docs/sources/examples.md | 26 +- docs/sources/examples/apt-cacher-ng.md | 21 +- .../examples/cfengine_process_management.md | 74 +- docs/sources/examples/couchdb_data_volumes.md | 11 +- docs/sources/examples/hello_world.md | 71 +- docs/sources/examples/https.md | 28 +- docs/sources/examples/mongodb.md | 24 +- docs/sources/examples/nodejs_web_app.md | 63 +- docs/sources/examples/postgresql_service.md | 12 +- docs/sources/examples/python_web_app.md | 28 +- .../sources/examples/running_redis_service.md | 35 +- docs/sources/examples/running_riak_service.md | 26 +- docs/sources/examples/running_ssh_service.md | 8 +- docs/sources/examples/using_supervisord.md | 39 +- docs/sources/faq.md | 285 ++++---- docs/sources/index.md | 2 +- docs/sources/index/accounts.md | 12 +- docs/sources/installation.md | 28 +- docs/sources/installation/amazon.md | 103 ++- docs/sources/installation/archlinux.md | 20 +- docs/sources/installation/binaries.md | 31 +- docs/sources/installation/cruxlinux.md | 20 +- docs/sources/installation/fedora.md | 12 +- docs/sources/installation/frugalware.md | 24 +- docs/sources/installation/gentoolinux.md | 22 +- docs/sources/installation/google.md | 20 +- docs/sources/installation/mac.md | 21 +- docs/sources/installation/openSUSE.md | 15 +- docs/sources/installation/rackspace.md | 6 +- docs/sources/installation/rhel.md | 31 +- docs/sources/installation/softlayer.md | 42 +- docs/sources/installation/ubuntulinux.md | 40 +- docs/sources/installation/windows.md | 51 +- docs/sources/introduction/technology.md | 4 +- .../introduction/working-with-docker.md | 10 +- docs/sources/reference.md | 8 +- docs/sources/reference/api.md | 154 ++-- docs/sources/reference/api/README.md | 11 +- .../api/archive/docker_remote_api_v1.0.md | 237 +++--- .../api/archive/docker_remote_api_v1.1.md | 237 +++--- .../api/archive/docker_remote_api_v1.2.md | 242 ++++--- .../api/archive/docker_remote_api_v1.3.md | 260 ++++--- .../api/archive/docker_remote_api_v1.4.md | 300 ++++---- .../api/archive/docker_remote_api_v1.5.md | 267 +++---- .../api/archive/docker_remote_api_v1.6.md | 268 +++---- .../api/archive/docker_remote_api_v1.7.md | 287 ++++---- .../api/archive/docker_remote_api_v1.8.md | 292 ++++---- .../reference/api/docker_io_accounts_api.md | 54 +- .../reference/api/docker_io_oauth_api.md | 86 +-- .../reference/api/docker_remote_api.md | 278 ++++---- .../reference/api/docker_remote_api_v1.10.md | 269 +++---- .../reference/api/docker_remote_api_v1.11.md | 293 ++++---- .../reference/api/docker_remote_api_v1.9.md | 288 ++++---- docs/sources/reference/api/index_api.md | 122 ++-- docs/sources/reference/api/registry_api.md | 164 +++-- .../reference/api/registry_index_spec.md | 613 ++++++++-------- .../api/remote_api_client_libraries.md | 199 ++++-- docs/sources/reference/builder.md | 390 +++++----- docs/sources/reference/commandline.md | 2 +- docs/sources/reference/commandline/cli.md | 675 +++++++++--------- docs/sources/reference/run.md | 301 ++++---- docs/sources/terms.md | 12 +- docs/sources/terms/container.md | 27 +- docs/sources/terms/filesystem.md | 15 +- docs/sources/terms/image.md | 8 +- docs/sources/terms/layer.md | 2 +- docs/sources/terms/registry.md | 10 +- docs/sources/terms/repository.md | 27 +- docs/sources/toctree.md | 16 +- docs/sources/use.md | 18 +- .../sources/use/ambassador_pattern_linking.md | 7 +- docs/sources/use/basics.md | 13 +- docs/sources/use/chef.md | 4 +- docs/sources/use/host_integration.md | 7 +- docs/sources/use/networking.md | 32 +- docs/sources/use/port_redirection.md | 63 +- docs/sources/use/puppet.md | 10 +- docs/sources/use/working_with_links_names.md | 92 ++- docs/sources/use/working_with_volumes.md | 83 +-- docs/sources/use/workingwithrepository.md | 76 +- 87 files changed, 4408 insertions(+), 4191 deletions(-) diff --git a/docs/sources/articles.md b/docs/sources/articles.md index da5a2d255f..54c067d0cc 100644 --- a/docs/sources/articles.md +++ b/docs/sources/articles.md @@ -2,7 +2,7 @@ ## Contents: -- [Docker Security](security/) -- [Create a Base Image](baseimages/) -- [Runtime Metrics](runmetrics/) + - [Docker Security](security/) + - [Create a Base Image](baseimages/) + - [Runtime Metrics](runmetrics/) diff --git a/docs/sources/articles/baseimages.md b/docs/sources/articles/baseimages.md index d2d6336a6c..3754bab6aa 100644 --- a/docs/sources/articles/baseimages.md +++ b/docs/sources/articles/baseimages.md @@ -4,8 +4,8 @@ page_keywords: Examples, Usage, base image, docker, documentation, examples # Create a Base Image -So you want to create your own [*Base -Image*](../../terms/image/#base-image-def)? Great! +So you want to create your own [*Base Image*]( +../../terms/image/#base-image-def)? Great! The specific process will depend heavily on the Linux distribution you want to package. We have some examples below, and you are encouraged to @@ -13,9 +13,9 @@ submit pull requests to contribute new ones. ## Create a full image using tar -In general, you’ll want to start with a working machine that is running -the distribution you’d like to package as a base image, though that is -not required for some tools like Debian’s +In general, you'll want to start with a working machine that is running +the distribution you'd like to package as a base image, though that is +not required for some tools like Debian's [Debootstrap](https://wiki.debian.org/Debootstrap), which you can also use to build Ubuntu images. @@ -33,19 +33,18 @@ It can be as simple as this to create an Ubuntu base image: There are more example scripts for creating base images in the Docker GitHub Repo: -- [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh) -- CentOS / Scientific Linux CERN (SLC) [on - Debian/Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh) - or [on - CentOS/RHEL/SLC/etc.](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh) -- [Debian / - Ubuntu](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh) + - [BusyBox](https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh) + - CentOS / Scientific Linux CERN (SLC) [on Debian/Ubuntu]( + https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh) or + [on CentOS/RHEL/SLC/etc.]( + https://github.com/dotcloud/docker/blob/master/contrib/mkimage-yum.sh) + - [Debian / Ubuntu]( + https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh) ## Creating a simple base image using `scratch` -There is a special repository in the Docker registry called -`scratch`, which was created using an empty tar -file: +There is a special repository in the Docker registry called `scratch`, which +was created using an empty tar file: $ tar cv --files-from /dev/null | docker import - scratch @@ -56,5 +55,5 @@ image to base your new minimal containers `FROM`: ADD true-asm /true CMD ["/true"] -The Dockerfile above is from extremely minimal image - -[tianon/true](https://github.com/tianon/dockerfiles/tree/master/true). +The Dockerfile above is from extremely minimal image - [tianon/true]( +https://github.com/tianon/dockerfiles/tree/master/true). diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md index 30e204c892..4cc210bb52 100644 --- a/docs/sources/articles/runmetrics.md +++ b/docs/sources/articles/runmetrics.md @@ -4,8 +4,8 @@ page_keywords: docker, metrics, CPU, memory, disk, IO, run, runtime # Runtime Metrics -Linux Containers rely on [control -groups](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) +Linux Containers rely on [control groups]( +https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) which not only track groups of processes, but also expose metrics about CPU, memory, and block I/O usage. You can access those metrics and obtain network usage metrics as well. This is relevant for "pure" LXC @@ -14,16 +14,15 @@ containers, as well as for Docker containers. ## Control Groups Control groups are exposed through a pseudo-filesystem. In recent -distros, you should find this filesystem under -`/sys/fs/cgroup`. Under that directory, you will see -multiple sub-directories, called devices, freezer, blkio, etc.; each -sub-directory actually corresponds to a different cgroup hierarchy. +distros, you should find this filesystem under `/sys/fs/cgroup`. Under +that directory, you will see multiple sub-directories, called devices, +freezer, blkio, etc.; each sub-directory actually corresponds to a different +cgroup hierarchy. -On older systems, the control groups might be mounted on -`/cgroup`, without distinct hierarchies. In that -case, instead of seeing the sub-directories, you will see a bunch of -files in that directory, and possibly some directories corresponding to -existing containers. +On older systems, the control groups might be mounted on `/cgroup`, without +distinct hierarchies. In that case, instead of seeing the sub-directories, +you will see a bunch of files in that directory, and possibly some directories +corresponding to existing containers. To figure out where your control groups are mounted, you can run: @@ -31,17 +30,14 @@ To figure out where your control groups are mounted, you can run: ## Enumerating Cgroups -You can look into `/proc/cgroups` to see the -different control group subsystems known to the system, the hierarchy -they belong to, and how many groups they contain. +You can look into `/proc/cgroups` to see the different control group subsystems +known to the system, the hierarchy they belong to, and how many groups they contain. -You can also look at `/proc//cgroup` to see -which control groups a process belongs to. The control group will be -shown as a path relative to the root of the hierarchy mountpoint; e.g. -`/` means “this process has not been assigned into a -particular group”, while `/lxc/pumpkin` means that -the process is likely to be a member of a container named -`pumpkin`. +You can also look at `/proc//cgroup` to see which control groups a process +belongs to. The control group will be shown as a path relative to the root of +the hierarchy mountpoint; e.g. `/` means “this process has not been assigned into +a particular group”, while `/lxc/pumpkin` means that the process is likely to be +a member of a container named `pumpkin`. ## Finding the Cgroup for a Given Container @@ -53,12 +49,11 @@ of the LXC tools, the cgroup will be `lxc/.` For Docker containers using cgroups, the container name will be the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in `docker ps`, its long ID might be something like -`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can look it up with `docker inspect` -or `docker ps -notrunc`. +`ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can +look it up with `docker inspect` or `docker ps -notrunc`. Putting everything together to look at the memory metrics for a Docker -container, take a look at -`/sys/fs/cgroup/memory/lxc//`. +container, take a look at `/sys/fs/cgroup/memory/lxc//`. ## Metrics from Cgroups: Memory, CPU, Block IO @@ -106,10 +101,9 @@ Here is what it will look like: total_active_file 4489052160 total_unevictable 32768 -The first half (without the `total_` prefix) -contains statistics relevant to the processes within the cgroup, -excluding sub-cgroups. The second half (with the `total_` -prefix) includes sub-cgroups as well. +The first half (without the `total_` prefix) contains statistics relevant +to the processes within the cgroup, excluding sub-cgroups. The second half +(with the `total_` prefix) includes sub-cgroups as well. Some metrics are "gauges", i.e. values that can increase or decrease (e.g. swap, the amount of swap space used by the members of the cgroup). @@ -118,95 +112,104 @@ they represent occurrences of a specific event (e.g. pgfault, which indicates the number of page faults which happened since the creation of the cgroup; this number can never decrease). -cache -: the amount of memory used by the processes of this control group - that can be associated precisely with a block on a block device. - When you read from and write to files on disk, this amount will - increase. This will be the case if you use "conventional" I/O - (`open`, `read`, - `write` syscalls) as well as mapped files (with - `mmap`). It also accounts for the memory used by - `tmpfs` mounts, though the reasons are unclear. -rss -: the amount of memory that *doesn’t* correspond to anything on disk: - stacks, heaps, and anonymous memory maps. -mapped\_file -: indicates the amount of memory mapped by the processes in the - control group. It doesn’t give you information about *how much* - memory is used; it rather tells you *how* it is used. -pgfault and pgmajfault -: indicate the number of times that a process of the cgroup triggered - a "page fault" and a "major fault", respectively. A page fault - happens when a process accesses a part of its virtual memory space - which is nonexistent or protected. The former can happen if the - process is buggy and tries to access an invalid address (it will - then be sent a `SIGSEGV` signal, typically - killing it with the famous `Segmentation fault` - message). The latter can happen when the process reads from a memory - zone which has been swapped out, or which corresponds to a mapped - file: in that case, the kernel will load the page from disk, and let - the CPU complete the memory access. It can also happen when the - process writes to a copy-on-write memory zone: likewise, the kernel - will preempt the process, duplicate the memory page, and resume the - write operation on the process’ own copy of the page. "Major" faults - happen when the kernel actually has to read the data from disk. When - it just has to duplicate an existing page, or allocate an empty - page, it’s a regular (or "minor") fault. -swap -: the amount of swap currently used by the processes in this cgroup. -active\_anon and inactive\_anon -: the amount of *anonymous* memory that has been identified has - respectively *active* and *inactive* by the kernel. "Anonymous" - memory is the memory that is *not* linked to disk pages. In other - words, that’s the equivalent of the rss counter described above. In - fact, the very definition of the rss counter is **active\_anon** + - **inactive\_anon** - **tmpfs** (where tmpfs is the amount of memory - used up by `tmpfs` filesystems mounted by this - control group). Now, what’s the difference between "active" and - "inactive"? Pages are initially "active"; and at regular intervals, - the kernel sweeps over the memory, and tags some pages as - "inactive". Whenever they are accessed again, they are immediately - retagged "active". When the kernel is almost out of memory, and time - comes to swap out to disk, the kernel will swap "inactive" pages. -active\_file and inactive\_file -: cache memory, with *active* and *inactive* similar to the *anon* - memory above. The exact formula is cache = **active\_file** + - **inactive\_file** + **tmpfs**. The exact rules used by the kernel - to move memory pages between active and inactive sets are different - from the ones used for anonymous memory, but the general principle - is the same. Note that when the kernel needs to reclaim memory, it - is cheaper to reclaim a clean (=non modified) page from this pool, - since it can be reclaimed immediately (while anonymous pages and - dirty/modified pages have to be written to disk first). -unevictable -: the amount of memory that cannot be reclaimed; generally, it will - account for memory that has been "locked" with `mlock` -. It is often used by crypto frameworks to make sure that - secret keys and other sensitive material never gets swapped out to - disk. -memory and memsw limits -: These are not really metrics, but a reminder of the limits applied - to this cgroup. The first one indicates the maximum amount of - physical memory that can be used by the processes of this control - group; the second one indicates the maximum amount of RAM+swap. + + - **cache:** + the amount of memory used by the processes of this control group + that can be associated precisely with a block on a block device. + When you read from and write to files on disk, this amount will + increase. This will be the case if you use "conventional" I/O + (`open`, `read`, + `write` syscalls) as well as mapped files (with + `mmap`). It also accounts for the memory used by + `tmpfs` mounts, though the reasons are unclear. + + - **rss:** + the amount of memory that *doesn't* correspond to anything on disk: + stacks, heaps, and anonymous memory maps. + + - **mapped_file:** + indicates the amount of memory mapped by the processes in the + control group. It doesn't give you information about *how much* + memory is used; it rather tells you *how* it is used. + + - **pgfault and pgmajfault:** + indicate the number of times that a process of the cgroup triggered + a "page fault" and a "major fault", respectively. A page fault + happens when a process accesses a part of its virtual memory space + which is nonexistent or protected. The former can happen if the + process is buggy and tries to access an invalid address (it will + then be sent a `SIGSEGV` signal, typically + killing it with the famous `Segmentation fault` + message). The latter can happen when the process reads from a memory + zone which has been swapped out, or which corresponds to a mapped + file: in that case, the kernel will load the page from disk, and let + the CPU complete the memory access. It can also happen when the + process writes to a copy-on-write memory zone: likewise, the kernel + will preempt the process, duplicate the memory page, and resume the + write operation on the process` own copy of the page. "Major" faults + happen when the kernel actually has to read the data from disk. When + it just has to duplicate an existing page, or allocate an empty + page, it's a regular (or "minor") fault. + + - **swap:** + the amount of swap currently used by the processes in this cgroup. + + - **active_anon and inactive_anon:** + the amount of *anonymous* memory that has been identified has + respectively *active* and *inactive* by the kernel. "Anonymous" + memory is the memory that is *not* linked to disk pages. In other + words, that's the equivalent of the rss counter described above. In + fact, the very definition of the rss counter is **active_anon** + + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory + used up by `tmpfs` filesystems mounted by this + control group). Now, what's the difference between "active" and + "inactive"? Pages are initially "active"; and at regular intervals, + the kernel sweeps over the memory, and tags some pages as + "inactive". Whenever they are accessed again, they are immediately + retagged "active". When the kernel is almost out of memory, and time + comes to swap out to disk, the kernel will swap "inactive" pages. + + - **active_file and inactive_file:** + cache memory, with *active* and *inactive* similar to the *anon* + memory above. The exact formula is cache = **active_file** + + **inactive_file** + **tmpfs**. The exact rules used by the kernel + to move memory pages between active and inactive sets are different + from the ones used for anonymous memory, but the general principle + is the same. Note that when the kernel needs to reclaim memory, it + is cheaper to reclaim a clean (=non modified) page from this pool, + since it can be reclaimed immediately (while anonymous pages and + dirty/modified pages have to be written to disk first). + + - **unevictable:** + the amount of memory that cannot be reclaimed; generally, it will + account for memory that has been "locked" with `mlock`. + It is often used by crypto frameworks to make sure that + secret keys and other sensitive material never gets swapped out to + disk. + + - **memory and memsw limits:** + These are not really metrics, but a reminder of the limits applied + to this cgroup. The first one indicates the maximum amount of + physical memory that can be used by the processes of this control + group; the second one indicates the maximum amount of RAM+swap. Accounting for memory in the page cache is very complex. If two processes in different control groups both read the same file (ultimately relying on the same blocks on disk), the corresponding -memory charge will be split between the control groups. It’s nice, but +memory charge will be split between the control groups. It's nice, but it also means that when a cgroup is terminated, it could increase the memory usage of another cgroup, because they are not splitting the cost anymore for those memory pages. ### CPU metrics: `cpuacct.stat` -Now that we’ve covered memory metrics, everything else will look very +Now that we've covered memory metrics, everything else will look very simple in comparison. CPU metrics will be found in the `cpuacct` controller. For each container, you will find a pseudo-file `cpuacct.stat`, containing the CPU usage accumulated by the processes of the container, -broken down between `user` and `system` time. If you’re not familiar +broken down between `user` and `system` time. If you're not familiar with the distinction, `user` is the time during which the processes were in direct control of the CPU (i.e. executing process code), and `system` is the time during which the CPU was executing system calls on behalf of @@ -217,43 +220,47 @@ they are expressed in "user jiffies". There are `USER_HZ` *"jiffies"* per second, and on x86 systems, `USER_HZ` is 100. This used to map exactly to the number of scheduler "ticks" per second; but with the advent of higher -frequency scheduling, as well as [tickless -kernels](http://lwn.net/Articles/549580/), the number of kernel ticks -wasn’t relevant anymore. It stuck around anyway, mainly for legacy and +frequency scheduling, as well as [tickless kernels]( +http://lwn.net/Articles/549580/), the number of kernel ticks +wasn't relevant anymore. It stuck around anyway, mainly for legacy and compatibility reasons. ### Block I/O metrics Block I/O is accounted in the `blkio` controller. Different metrics are scattered across different files. While you can -find in-depth details in the -[blkio-controller](https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt) +find in-depth details in the [blkio-controller]( +https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt) file in the kernel documentation, here is a short list of the most relevant ones: -blkio.sectors -: contain the number of 512-bytes sectors read and written by the - processes member of the cgroup, device by device. Reads and writes - are merged in a single counter. -blkio.io\_service\_bytes -: indicates the number of bytes read and written by the cgroup. It has - 4 counters per device, because for each device, it differentiates - between synchronous vs. asynchronous I/O, and reads vs. writes. -blkio.io\_serviced -: the number of I/O operations performed, regardless of their size. It - also has 4 counters per device. -blkio.io\_queued -: indicates the number of I/O operations currently queued for this - cgroup. In other words, if the cgroup isn’t doing any I/O, this will - be zero. Note that the opposite is not true. In other words, if - there is no I/O queued, it does not mean that the cgroup is idle - (I/O-wise). It could be doing purely synchronous reads on an - otherwise quiescent device, which is therefore able to handle them - immediately, without queuing. Also, while it is helpful to figure - out which cgroup is putting stress on the I/O subsystem, keep in - mind that is is a relative quantity. Even if a process group does - not perform more I/O, its queue size can increase just because the - device load increases because of other devices. + + - **blkio.sectors:** + contain the number of 512-bytes sectors read and written by the + processes member of the cgroup, device by device. Reads and writes + are merged in a single counter. + + - **blkio.io_service_bytes:** + indicates the number of bytes read and written by the cgroup. It has + 4 counters per device, because for each device, it differentiates + between synchronous vs. asynchronous I/O, and reads vs. writes. + + - **blkio.io_serviced:** + the number of I/O operations performed, regardless of their size. It + also has 4 counters per device. + + - **blkio.io_queued:** + indicates the number of I/O operations currently queued for this + cgroup. In other words, if the cgroup isn't doing any I/O, this will + be zero. Note that the opposite is not true. In other words, if + there is no I/O queued, it does not mean that the cgroup is idle + (I/O-wise). It could be doing purely synchronous reads on an + otherwise quiescent device, which is therefore able to handle them + immediately, without queuing. Also, while it is helpful to figure + out which cgroup is putting stress on the I/O subsystem, keep in + mind that is is a relative quantity. Even if a process group does + not perform more I/O, its queue size can increase just because the + device load increases because of other devices. ## Network Metrics @@ -261,9 +268,9 @@ Network metrics are not exposed directly by control groups. There is a good explanation for that: network interfaces exist within the context of *network namespaces*. The kernel could probably accumulate metrics about packets and bytes sent and received by a group of processes, but -those metrics wouldn’t be very useful. You want per-interface metrics +those metrics wouldn't be very useful. You want per-interface metrics (because traffic happening on the local `lo` -interface doesn’t really count). But since processes in a single cgroup +interface doesn't really count). But since processes in a single cgroup can belong to multiple network namespaces, those metrics would be harder to interpret: multiple network namespaces means multiple `lo` interfaces, potentially multiple `eth0` @@ -324,7 +331,7 @@ The `ip-netns exec` command will let you execute any program (present in the host system) within any network namespace visible to the current process. This means that your host will be able to enter the network namespace of your containers, but your containers -won’t be able to access the host, nor their sibling containers. +won't be able to access the host, nor their sibling containers. Containers will be able to “see” and affect their sub-containers, though. @@ -351,11 +358,9 @@ those pseudo-files. (Symlinks are accepted.) In other words, to execute a command within the network namespace of a container, we need to: -- Find out the PID of any process within the container that we want to - investigate; -- Create a symlink from `/var/run/netns/` - to `/proc//ns/net` -- Execute `ip netns exec ....` +- Find out the PID of any process within the container that we want to investigate; +- Create a symlink from `/var/run/netns/` to `/proc//ns/net` +- Execute `ip netns exec ....` Please review [*Enumerating Cgroups*](#enumerating-cgroups) to learn how to find the cgroup of a pprocess running in the container of which you want to @@ -386,7 +391,7 @@ write your metric collector in C (or any language that lets you do low-level system calls). You need to use a special system call, `setns()`, which lets the current process enter any arbitrary namespace. It requires, however, an open file descriptor to -the namespace pseudo-file (remember: that’s the pseudo-file in +the namespace pseudo-file (remember: that's the pseudo-file in `/proc//ns/net`). However, there is a catch: you must not keep this file descriptor open. @@ -409,26 +414,26 @@ carefully cleans up after itself, but it is still possible. It is usually easier to collect metrics at regular intervals (e.g. every minute, with the collectd LXC plugin) and rely on that instead. -But, if you’d still like to gather the stats when a container stops, +But, if you'd still like to gather the stats when a container stops, here is how: For each container, start a collection process, and move it to the control groups that you want to monitor by writing its PID to the tasks file of the cgroup. The collection process should periodically re-read -the tasks file to check if it’s the last process of the control group. +the tasks file to check if it's the last process of the control group. (If you also want to collect network statistics as explained in the previous section, you should also move the process to the appropriate network namespace.) When the container exits, `lxc-start` will try to delete the control groups. It will fail, since the control group is -still in use; but that’s fine. You process should now detect that it is +still in use; but that's fine. You process should now detect that it is the only one remaining in the group. Now is the right time to collect all the metrics you need! Finally, your process should move itself back to the root control group, and remove the container control group. To remove a control group, just -`rmdir` its directory. It’s counter-intuitive to +`rmdir` its directory. It's counter-intuitive to `rmdir` a directory as it still contains files; but -remember that this is a pseudo-filesystem, so usual rules don’t apply. +remember that this is a pseudo-filesystem, so usual rules don't apply. After the cleanup is done, the collection process can exit safely. diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md index 1a438295e7..4519248015 100644 --- a/docs/sources/articles/security.md +++ b/docs/sources/articles/security.md @@ -9,11 +9,11 @@ page_keywords: Docker, Docker documentation, security There are three major areas to consider when reviewing Docker security: -- the intrinsic security of containers, as implemented by kernel - namespaces and cgroups; -- the attack surface of the Docker daemon itself; -- the "hardening" security features of the kernel and how they - interact with containers. + - the intrinsic security of containers, as implemented by kernel + namespaces and cgroups; + - the attack surface of the Docker daemon itself; + - the "hardening" security features of the kernel and how they + interact with containers. ## Kernel Namespaces @@ -33,7 +33,7 @@ less affect, processes running in another container, or in the host system. **Each container also gets its own network stack**, meaning that a -container doesn’t get a privileged access to the sockets or interfaces +container doesn't get a privileged access to the sockets or interfaces of another container. Of course, if the host system is setup accordingly, containers can interact with each other through their respective network interfaces — just like they can interact with @@ -54,8 +54,8 @@ This means that since July 2008 (date of the 2.6.26 release, now 5 years ago), namespace code has been exercised and scrutinized on a large number of production systems. And there is more: the design and inspiration for the namespaces code are even older. Namespaces are -actually an effort to reimplement the features of -[OpenVZ](http://en.wikipedia.org/wiki/OpenVZ) in such a way that they +actually an effort to reimplement the features of [OpenVZ]( +http://en.wikipedia.org/wiki/OpenVZ) in such a way that they could be merged within the mainstream kernel. And OpenVZ was initially released in 2005, so both the design and the implementation are pretty mature. @@ -90,11 +90,10 @@ Docker daemon**. This is a direct consequence of some powerful Docker features. Specifically, Docker allows you to share a directory between the Docker host and a guest container; and it allows you to do so without limiting the access rights of the container. This means that you -can start a container where the `/host` directory -will be the `/` directory on your host; and the -container will be able to alter your host filesystem without any -restriction. This sounds crazy? Well, you have to know that **all -virtualization systems allowing filesystem resource sharing behave the +can start a container where the `/host` directory will be the `/` directory +on your host; and the container will be able to alter your host filesystem +without any restriction. This sounds crazy? Well, you have to know that +**all virtualization systems allowing filesystem resource sharing behave the same way**. Nothing prevents you from sharing your root filesystem (or even your root block device) with a virtual machine. @@ -120,8 +119,8 @@ and client SSL certificates. Recent improvements in Linux namespaces will soon allow to run full-featured containers without root privileges, thanks to the new user -namespace. This is covered in detail -[here](http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/). +namespace. This is covered in detail [here]( +http://s3hh.wordpress.com/2013/07/19/creating-and-using-containers-without-privilege/). Moreover, this will solve the problem caused by sharing filesystems between host and guest, since the user namespace allows users within containers (including the root user) to be mapped to other users in the @@ -130,13 +129,13 @@ host system. The end goal for Docker is therefore to implement two additional security improvements: -- map the root user of a container to a non-root user of the Docker - host, to mitigate the effects of a container-to-host privilege - escalation; -- allow the Docker daemon to run without root privileges, and delegate - operations requiring those privileges to well-audited sub-processes, - each with its own (very limited) scope: virtual network setup, - filesystem management, etc. + - map the root user of a container to a non-root user of the Docker + host, to mitigate the effects of a container-to-host privilege + escalation; + - allow the Docker daemon to run without root privileges, and delegate + operations requiring those privileges to well-audited sub-processes, + each with its own (very limited) scope: virtual network setup, + filesystem management, etc. Finally, if you run Docker on a server, it is recommended to run exclusively Docker in the server, and move all other services within @@ -152,11 +151,11 @@ capabilities. What does that mean? Capabilities turn the binary "root/non-root" dichotomy into a fine-grained access control system. Processes (like web servers) that just need to bind on a port below 1024 do not have to run as root: they -can just be granted the `net_bind_service` -capability instead. And there are many other capabilities, for almost -all the specific areas where root privileges are usually needed. +can just be granted the `net_bind_service` capability instead. And there +are many other capabilities, for almost all the specific areas where root +privileges are usually needed. -This means a lot for container security; let’s see why! +This means a lot for container security; let's see why! Your average server (bare metal or virtual machine) needs to run a bunch of processes as root. Those typically include SSH, cron, syslogd; @@ -165,41 +164,41 @@ tools (to handle e.g. DHCP, WPA, or VPNs), and much more. A container is very different, because almost all of those tasks are handled by the infrastructure around the container: -- SSH access will typically be managed by a single server running in - the Docker host; -- `cron`, when necessary, should run as a user - process, dedicated and tailored for the app that needs its - scheduling service, rather than as a platform-wide facility; -- log management will also typically be handed to Docker, or by - third-party services like Loggly or Splunk; -- hardware management is irrelevant, meaning that you never need to - run `udevd` or equivalent daemons within - containers; -- network management happens outside of the containers, enforcing - separation of concerns as much as possible, meaning that a container - should never need to perform `ifconfig`, - `route`, or ip commands (except when a container - is specifically engineered to behave like a router or firewall, of - course). + - SSH access will typically be managed by a single server running in + the Docker host; + - `cron`, when necessary, should run as a user + process, dedicated and tailored for the app that needs its + scheduling service, rather than as a platform-wide facility; + - log management will also typically be handed to Docker, or by + third-party services like Loggly or Splunk; + - hardware management is irrelevant, meaning that you never need to + run `udevd` or equivalent daemons within + containers; + - network management happens outside of the containers, enforcing + separation of concerns as much as possible, meaning that a container + should never need to perform `ifconfig`, + `route`, or ip commands (except when a container + is specifically engineered to behave like a router or firewall, of + course). This means that in most cases, containers will not need "real" root privileges *at all*. And therefore, containers can run with a reduced capability set; meaning that "root" within a container has much less privileges than the real "root". For instance, it is possible to: -- deny all "mount" operations; -- deny access to raw sockets (to prevent packet spoofing); -- deny access to some filesystem operations, like creating new device - nodes, changing the owner of files, or altering attributes - (including the immutable flag); -- deny module loading; -- and many others. + - deny all "mount" operations; + - deny access to raw sockets (to prevent packet spoofing); + - deny access to some filesystem operations, like creating new device + nodes, changing the owner of files, or altering attributes (including + the immutable flag); + - deny module loading; + - and many others. This means that even if an intruder manages to escalate to root within a container, it will be much harder to do serious damage, or to escalate to the host. -This won’t affect regular web apps; but malicious users will find that +This won't affect regular web apps; but malicious users will find that the arsenal at their disposal has shrunk considerably! You can see [the list of dropped capabilities in the Docker code](https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97), @@ -217,28 +216,28 @@ modern Linux kernels. It is also possible to leverage existing, well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with Docker. -While Docker currently only enables capabilities, it doesn’t interfere +While Docker currently only enables capabilities, it doesn't interfere with the other systems. This means that there are many different ways to harden a Docker host. Here are a few examples. -- You can run a kernel with GRSEC and PAX. This will add many safety - checks, both at compile-time and run-time; it will also defeat many - exploits, thanks to techniques like address randomization. It - doesn’t require Docker-specific configuration, since those security - features apply system-wide, independently of containers. -- If your distribution comes with security model templates for LXC - containers, you can use them out of the box. For instance, Ubuntu - comes with AppArmor templates for LXC, and those templates provide - an extra safety net (even though it overlaps greatly with - capabilities). -- You can define your own policies using your favorite access control - mechanism. Since Docker containers are standard LXC containers, - there is nothing “magic” or specific to Docker. + - You can run a kernel with GRSEC and PAX. This will add many safety + checks, both at compile-time and run-time; it will also defeat many + exploits, thanks to techniques like address randomization. It + doesn't require Docker-specific configuration, since those security + features apply system-wide, independently of containers. + - If your distribution comes with security model templates for LXC + containers, you can use them out of the box. For instance, Ubuntu + comes with AppArmor templates for LXC, and those templates provide + an extra safety net (even though it overlaps greatly with + capabilities). + - You can define your own policies using your favorite access control + mechanism. Since Docker containers are standard LXC containers, + there is nothing “magic” or specific to Docker. Just like there are many third-party tools to augment Docker containers with e.g. special network topologies or shared filesystems, you can expect to see tools to harden existing Docker containers without -affecting Docker’s core. +affecting Docker's core. ## Conclusions @@ -254,5 +253,5 @@ containerization systems, you will be able to implement them as well with Docker, since everything is provided by the kernel anyway. For more context and especially for comparisons with VMs and other -container systems, please also see the [original blog -post](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/). +container systems, please also see the [original blog post]( +http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/). diff --git a/docs/sources/contributing.md b/docs/sources/contributing.md index b311d13f8c..0a1e4fd282 100644 --- a/docs/sources/contributing.md +++ b/docs/sources/contributing.md @@ -2,6 +2,6 @@ ## Contents: -- [Contributing to Docker](contributing/) -- [Setting Up a Dev Environment](devenvironment/) + - [Contributing to Docker](contributing/) + - [Setting Up a Dev Environment](devenvironment/) diff --git a/docs/sources/contributing/contributing.md b/docs/sources/contributing/contributing.md index 9e2ad19073..dd764eb855 100644 --- a/docs/sources/contributing/contributing.md +++ b/docs/sources/contributing/contributing.md @@ -6,19 +6,19 @@ page_keywords: contributing, docker, documentation, help, guideline Want to hack on Docker? Awesome! -The repository includes [all the instructions you need to get -started](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). +The repository includes [all the instructions you need to get started]( +https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). -The [developer environment -Dockerfile](https://github.com/dotcloud/docker/blob/master/Dockerfile) +The [developer environment Dockerfile]( +https://github.com/dotcloud/docker/blob/master/Dockerfile) specifies the tools and versions used to test and build Docker. -If you’re making changes to the documentation, see the -[README.md](https://github.com/dotcloud/docker/blob/master/docs/README.md). +If you're making changes to the documentation, see the [README.md]( +https://github.com/dotcloud/docker/blob/master/docs/README.md). -The [documentation environment -Dockerfile](https://github.com/dotcloud/docker/blob/master/docs/Dockerfile) +The [documentation environment Dockerfile]( +https://github.com/dotcloud/docker/blob/master/docs/Dockerfile) specifies the tools and versions used to build the Documentation. -Further interesting details can be found in the [Packaging -hints](https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md). +Further interesting details can be found in the [Packaging hints]( +https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md). diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index 6551d9fbac..f7c66274e8 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -12,18 +12,18 @@ binaries, go environment, go dependencies, etc. ## Install Docker -Docker’s build environment itself is a Docker container, so the first +Docker's build environment itself is a Docker container, so the first step is to install Docker on your system. You can follow the [install instructions most relevant to your -system](https://docs.docker.io/en/latest/installation/). Make sure you +system](https://docs.docker.io/installation/). Make sure you have a working, up-to-date docker installation, then continue to the next step. ## Install tools used for this tutorial -Install `git`; honest, it’s very good. You can use -other ways to get the Docker source, but they’re not anywhere near as +Install `git`; honest, it's very good. You can use +other ways to get the Docker source, but they're not anywhere near as easy. Install `make`. This tutorial uses our base Makefile @@ -56,8 +56,7 @@ To create the Docker binary, run this command: sudo make binary -This will create the Docker binary in -`./bundles/-dev/binary/` +This will create the Docker binary in `./bundles/-dev/binary/` ### Using your built Docker binary @@ -107,10 +106,10 @@ something like this ok github.com/dotcloud/docker/utils 0.017s If $TESTFLAGS is set in the environment, it is passed as extra -arguments to ‘go test’. You can use this to select certain tests to run, +arguments to `go test`. You can use this to select certain tests to run, eg. -> TESTFLAGS=’-run \^TestBuild\$’ make test + TESTFLAGS=`-run \^TestBuild\$` make test If the output indicates "FAIL" and you see errors like this: @@ -118,7 +117,7 @@ If the output indicates "FAIL" and you see errors like this: utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device -Then you likely don’t have enough memory available the test suite. 2GB +Then you likely don't have enough memory available the test suite. 2GB is recommended. ## Use Docker @@ -135,13 +134,14 @@ If you want to read the documentation from a local website, or are making changes to it, you can build the documentation and then serve it by: - sudo make docs + sudo make docs + # when its done, you can point your browser to http://yourdockerhost:8000 - # type Ctrl-C to exit + # type Ctrl-C to exit **Need More Help?** -If you need more help then hop on to the [\#docker-dev IRC +If you need more help then hop on to the [#docker-dev IRC channel](irc://chat.freenode.net#docker-dev) or post a message on the [Docker developer mailing list](https://groups.google.com/d/forum/docker-dev). diff --git a/docs/sources/examples.md b/docs/sources/examples.md index 98b3d25893..f1d1567f52 100644 --- a/docs/sources/examples.md +++ b/docs/sources/examples.md @@ -9,17 +9,17 @@ substantial services like those which you might find in production. ## Contents: -- [Check your Docker install](hello_world/) -- [Hello World](hello_world/#hello-world) -- [Hello World Daemon](hello_world/#hello-world-daemon) -- [Node.js Web App](nodejs_web_app/) -- [Redis Service](running_redis_service/) -- [SSH Daemon Service](running_ssh_service/) -- [CouchDB Service](couchdb_data_volumes/) -- [PostgreSQL Service](postgresql_service/) -- [Building an Image with MongoDB](mongodb/) -- [Riak Service](running_riak_service/) -- [Using Supervisor with Docker](using_supervisord/) -- [Process Management with CFEngine](cfengine_process_management/) -- [Python Web App](python_web_app/) + - [Check your Docker install](hello_world/) + - [Hello World](hello_world/#hello-world) + - [Hello World Daemon](hello_world/#hello-world-daemon) + - [Node.js Web App](nodejs_web_app/) + - [Redis Service](running_redis_service/) + - [SSH Daemon Service](running_ssh_service/) + - [CouchDB Service](couchdb_data_volumes/) + - [PostgreSQL Service](postgresql_service/) + - [Building an Image with MongoDB](mongodb/) + - [Riak Service](running_riak_service/) + - [Using Supervisor with Docker](using_supervisord/) + - [Process Management with CFEngine](cfengine_process_management/) + - [Python Web App](python_web_app/) diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md index c7fee5542a..bfcf1ed232 100644 --- a/docs/sources/examples/apt-cacher-ng.md +++ b/docs/sources/examples/apt-cacher-ng.md @@ -9,13 +9,13 @@ page_keywords: docker, example, package installation, networking, debian, ubuntu > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup). -> - **If you’re using OS X or docker via TCP** then you shouldn’t use +> - **If you're using OS X or docker via TCP** then you shouldn't use > sudo. When you have multiple Docker servers, or build unrelated Docker -containers which can’t make use of the Docker build cache, it can be +containers which can't make use of the Docker build cache, it can be useful to have a caching proxy for your packages. This container makes the second download of any package almost instant. @@ -45,7 +45,7 @@ Then run it, mapping the exposed port to one on the host $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng -To see the logfiles that are ‘tailed’ in the default command, you can +To see the logfiles that are `tailed` in the default command, you can use: $ sudo docker logs -f test_apt_cacher_ng @@ -53,13 +53,12 @@ use: To get your Debian-based containers to use the proxy, you can do one of three things -1. Add an apt Proxy setting - `echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy` - -2. Set an environment variable: - `http_proxy=http://dockerhost:3142/` -3. Change your `sources.list` entries to start with - `http://dockerhost:3142/` +1. Add an apt Proxy setting + `echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy` +2. Set an environment variable: + `http_proxy=http://dockerhost:3142/` +3. Change your `sources.list` entries to start with + `http://dockerhost:3142/` **Option 1** injects the settings safely into your apt configuration in a local version of a common base: diff --git a/docs/sources/examples/cfengine_process_management.md b/docs/sources/examples/cfengine_process_management.md index 45d6edcec4..965ad252d2 100644 --- a/docs/sources/examples/cfengine_process_management.md +++ b/docs/sources/examples/cfengine_process_management.md @@ -10,14 +10,14 @@ Docker monitors one process in each running container and the container lives or dies with that process. By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise: -- It is possible to easily start multiple processes within a - container, all of which will be managed automatically, with the - normal `docker run` command. -- If a managed process dies or crashes, CFEngine will start it again - within 1 minute. -- The container itself will live as long as the CFEngine scheduling - daemon (cf-execd) lives. With CFEngine, we are able to decouple the - life of the container from the uptime of the service it provides. + - It is possible to easily start multiple processes within a + container, all of which will be managed automatically, with the + normal `docker run` command. + - If a managed process dies or crashes, CFEngine will start it again + within 1 minute. + - The container itself will live as long as the CFEngine scheduling + daemon (cf-execd) lives. With CFEngine, we are able to decouple the + life of the container from the uptime of the service it provides. ## How it works @@ -25,23 +25,20 @@ CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image. -The Dockerfile’s `ENTRYPOINT` takes an arbitrary +The Dockerfile's `ENTRYPOINT` takes an arbitrary amount of commands (with any desired arguments) as parameters. When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container. -CFEngine scans the process table for the `basename` -of the commands given to the `ENTRYPOINT` and runs -the command to start the process if the `basename` +CFEngine scans the process table for the `basename` of the commands given +to the `ENTRYPOINT` and runs the command to start the process if the `basename` is not found. For example, if we start the container with -`docker run "/path/to/my/application parameters"`, -CFEngine will look for a process named `application` -and run the command. If an entry for `application` -is not found in the process table at any point in time, CFEngine will -execute `/path/to/my/application parameters` to -start the application once again. The check on the process table happens -every minute. +`docker run "/path/to/my/application parameters"`, CFEngine will look for a +process named `application` and run the command. If an entry for `application` +is not found in the process table at any point in time, CFEngine will execute +`/path/to/my/application parameters` to start the application once again. The +check on the process table happens every minute. Note that it is therefore important that the command to start your application leaves a process with the basename of the command. This can @@ -56,11 +53,10 @@ in a single container. There are three steps: -1. Install CFEngine into the container. -2. Copy the CFEngine Docker process management policy into the - containerized CFEngine installation. -3. Start your application processes as part of the - `docker run` command. +1. Install CFEngine into the container. +2. Copy the CFEngine Docker process management policy into the + containerized CFEngine installation. +3. Start your application processes as part of the `docker run` command. ### Building the container image @@ -90,25 +86,22 @@ The first two steps can be done as part of a Dockerfile, as follows. ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"] -By saving this file as `Dockerfile` to a working -directory, you can then build your container with the docker build -command, e.g. `docker build -t managed_image`. +By saving this file as Dockerfile to a working directory, you can then build +your container with the docker build command, e.g. +`docker build -t managed_image`. ### Testing the container -Start the container with `apache2` and -`sshd` running and managed, forwarding a port to our -SSH instance: +Start the container with `apache2` and `sshd` running and managed, forwarding +a port to our SSH instance: docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" We now clearly see one of the benefits of the cfe-docker integration: it -allows to start several processes as part of a normal -`docker run` command. +allows to start several processes as part of a normal `docker run` command. -We can now log in to our new container and see that both -`apache2` and `sshd` are -running. We have set the root password to "password" in the Dockerfile +We can now log in to our new container and see that both `apache2` and `sshd` +are running. We have set the root password to "password" in the Dockerfile above and can use that to log in with ssh: ssh -p222 root@127.0.0.1 @@ -144,9 +137,8 @@ CFEngine. To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example: -- In the Dockerfile used above, install your applications instead of - `apache2` and `sshd`. -- When you start the container with `docker run`, - specify the command line arguments to your applications rather than - `apache2` and `sshd`. - + - In the Dockerfile used above, install your applications instead of + `apache2` and `sshd`. + - When you start the container with `docker run`, + specify the command line arguments to your applications rather than + `apache2` and `sshd`. diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 1b18cf0aa7..6af8e2fd1e 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -9,23 +9,22 @@ page_keywords: docker, example, package installation, networking, couchdb, data > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) -Here’s an example of using data volumes to share the same data between +Here's an example of using data volumes to share the same data between two CouchDB containers. This could be used for hot upgrades, testing different versions of CouchDB on the same data, etc. ## Create first database -Note that we’re marking `/var/lib/couchdb` as a data -volume. +Note that we're marking `/var/lib/couchdb` as a data volume. COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) ## Add data to the first database -We’re assuming your Docker host is reachable at `localhost`. If not, +We're assuming your Docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your Docker host. HOST=localhost @@ -34,7 +33,7 @@ replace `localhost` with the public IP of your Docker host. ## Create second database -This time, we’re requesting shared access to `$COUCH1`'s volumes. +This time, we're requesting shared access to `$COUCH1`'s volumes. COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md index 062d5d37b3..9bcc619896 100644 --- a/docs/sources/examples/hello_world.md +++ b/docs/sources/examples/hello_world.md @@ -25,7 +25,7 @@ for installation instructions. > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](#check-your-docker-installation). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) This is the most basic example available for using Docker. @@ -61,7 +61,6 @@ See the example in action - ## Hello World Daemon @@ -71,7 +70,7 @@ See the example in action > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](#check-your-docker-installation). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) And now for the most boring daemon ever written! @@ -87,16 +86,16 @@ continue to do this until we stop it. We are going to run a simple hello world daemon in a new container made from the `ubuntu` image. -- **"sudo docker run -d "** run a command in a new container. We pass - "-d" so it runs as a daemon. -- **"ubuntu"** is the image we want to run the command inside of. -- **"/bin/sh -c"** is the command we want to run in the container -- **"while true; do echo hello world; sleep 1; done"** is the mini - script we want to run, that will just print hello world once a - second until we stop it. -- **$container_id** the output of the run command will return a - container id, we can use in future commands to see what is going on - with this process. + - **"sudo docker run -d "** run a command in a new container. We pass + "-d" so it runs as a daemon. + - **"ubuntu"** is the image we want to run the command inside of. + - **"/bin/sh -c"** is the command we want to run in the container + - **"while true; do echo hello world; sleep 1; done"** is the mini + script we want to run, that will just print hello world once a + second until we stop it. + - **$container_id** the output of the run command will return a + container id, we can use in future commands to see what is going on + with this process. @@ -104,8 +103,8 @@ from the `ubuntu` image. Check the logs make sure it is working correctly. -- **"docker logs**" This will return the logs for a container -- **$container_id** The Id of the container we want the logs for. + - **"docker logs**" This will return the logs for a container + - **$container_id** The Id of the container we want the logs for. @@ -113,12 +112,12 @@ Check the logs make sure it is working correctly. Attach to the container to see the results in real-time. -- **"docker attach**" This will allow us to attach to a background - process to see what is going on. -- **"–sig-proxy=false"** Do not forward signals to the container; - allows us to exit the attachment using Control-C without stopping - the container. -- **$container_id** The Id of the container we want to attach to. + - **"docker attach**" This will allow us to attach to a background + process to see what is going on. + - **"–sig-proxy=false"** Do not forward signals to the container; + allows us to exit the attachment using Control-C without stopping + the container. + - **$container_id** The Id of the container we want to attach to. Exit from the container attachment by pressing Control-C. @@ -126,16 +125,16 @@ Exit from the container attachment by pressing Control-C. Check the process list to make sure it is running. -- **"docker ps"** this shows all running process managed by docker + - **"docker ps"** this shows all running process managed by docker sudo docker stop $container_id -Stop the container, since we don’t need it anymore. +Stop the container, since we don't need it anymore. -- **"docker stop"** This stops a container -- **$container_id** The Id of the container we want to stop. + - **"docker stop"** This stops a container + - **$container_id** The Id of the container we want to stop. @@ -151,16 +150,14 @@ See the example in action -The next example in the series is a [*Node.js Web -App*](../nodejs_web_app/#nodejs-web-app) example, or you could skip to -any of the other examples: - -- [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app) -- [*Redis Service*](../running_redis_service/#running-redis-service) -- [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service) -- [*CouchDB - Service*](../couchdb_data_volumes/#running-couchdb-service) -- [*PostgreSQL Service*](../postgresql_service/#postgresql-service) -- [*Building an Image with MongoDB*](../mongodb/#mongodb-image) -- [*Python Web App*](../python_web_app/#python-web-app) +The next example in the series is a [*Node.js Web App*]( +../nodejs_web_app/#nodejs-web-app) example, or you could skip to any of the +other examples: + - [*Node.js Web App*](../nodejs_web_app/#nodejs-web-app) + - [*Redis Service*](../running_redis_service/#running-redis-service) + - [*SSH Daemon Service*](../running_ssh_service/#running-ssh-service) + - [*CouchDB Service*](../couchdb_data_volumes/#running-couchdb-service) + - [*PostgreSQL Service*](../postgresql_service/#postgresql-service) + - [*Building an Image with MongoDB*](../mongodb/#mongodb-image) + - [*Python Web App*](../python_web_app/#python-web-app) diff --git a/docs/sources/examples/https.md b/docs/sources/examples/https.md index 153a6c0cf9..c46cf6b88c 100644 --- a/docs/sources/examples/https.md +++ b/docs/sources/examples/https.md @@ -8,7 +8,7 @@ By default, Docker runs via a non-networked Unix socket. It can also optionally communicate using a HTTP socket. If you need Docker reachable via the network in a safe manner, you can -enable TLS by specifying the tlsverify flag and pointing Docker’s +enable TLS by specifying the tlsverify flag and pointing Docker's tlscacert flag to a trusted CA certificate. In daemon mode, it will only allow connections from clients @@ -31,12 +31,12 @@ keys: Now that we have a CA, you can create a server key and certificate signing request. Make sure that "Common Name (e.g. server FQDN or YOUR name)" matches the hostname you will use to connect to Docker or just -use ‘\*’ for a certificate valid for any hostname: +use `\*` for a certificate valid for any hostname: $ openssl genrsa -des3 -out server-key.pem $ openssl req -new -key server-key.pem -out server.csr -Next we’re going to sign the key with our CA: +Next we're going to sign the key with our CA: $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \ -out server-cert.pem @@ -76,7 +76,7 @@ need to provide your client keys, certificates and trusted CA: -H=dns-name-of-docker-host:4243 > **Warning**: -> As shown in the example above, you don’t have to run the +> As shown in the example above, you don't have to run the > `docker` client with `sudo` or > the `docker` group when you use certificate > authentication. That means anyone with the keys can give any @@ -86,22 +86,22 @@ need to provide your client keys, certificates and trusted CA: ## Other modes -If you don’t want to have complete two-way authentication, you can run +If you don't want to have complete two-way authentication, you can run Docker in various other modes by mixing the flags. ### Daemon modes -- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients -- tls, tlscert, tlskey: Do not authenticate clients + - tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients + - tls, tlscert, tlskey: Do not authenticate clients ### Client modes -- tls: Authenticate server based on public/default CA pool -- tlsverify, tlscacert: Authenticate server based on given CA -- tls, tlscert, tlskey: Authenticate with client certificate, do not - authenticate server based on given CA -- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client - certificate, authenticate server based on given CA + - tls: Authenticate server based on public/default CA pool + - tlsverify, tlscacert: Authenticate server based on given CA + - tls, tlscert, tlskey: Authenticate with client certificate, do not + authenticate server based on given CA + - tlsverify, tlscacert, tlscert, tlskey: Authenticate with client + certificate, authenticate server based on given CA The client will send its client certificate if found, so you just need -to drop your keys into \~/.docker/\.pem +to drop your keys into ~/.docker/.pem diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md index c9078419d6..bf907891da 100644 --- a/docs/sources/examples/mongodb.md +++ b/docs/sources/examples/mongodb.md @@ -9,57 +9,57 @@ page_keywords: docker, example, package installation, networking, mongodb > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) The goal of this example is to show how you can build your own Docker images with MongoDB pre-installed. We will do that by constructing a -`Dockerfile` that downloads a base image, adds an +Dockerfile that downloads a base image, adds an apt source and installs the database software on Ubuntu. -## Creating a `Dockerfile` +## Creating a Dockerfile -Create an empty file called `Dockerfile`: +Create an empty file called Dockerfile: touch Dockerfile Next, define the parent image you want to use to build your own image on -top of. Here, we’ll use [Ubuntu](https://index.docker.io/_/ubuntu/) +top of. Here, we'll use [Ubuntu](https://index.docker.io/_/ubuntu/) (tag: `latest`) available on the [docker index](http://index.docker.io): FROM ubuntu:latest -Since we want to be running the latest version of MongoDB we’ll need to +Since we want to be running the latest version of MongoDB we'll need to add the 10gen repo to our apt sources list. # Add 10gen official apt source to the sources list RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list -Then, we don’t want Ubuntu to complain about init not being available so -we’ll divert `/sbin/initctl` to +Then, we don't want Ubuntu to complain about init not being available so +we'll divert `/sbin/initctl` to `/bin/true` so it thinks everything is working. # Hack for initctl not being available in Ubuntu RUN dpkg-divert --local --rename --add /sbin/initctl RUN ln -s /bin/true /sbin/initctl -Afterwards we’ll be able to update our apt repositories and install +Afterwards we'll be able to update our apt repositories and install MongoDB # Install MongoDB RUN apt-get update RUN apt-get install mongodb-10gen -To run MongoDB we’ll have to create the default data directory (because +To run MongoDB we'll have to create the default data directory (because we want it to run without needing to provide a special configuration file) # Create the MongoDB data directory RUN mkdir -p /data/db -Finally, we’ll expose the standard port that MongoDB runs on, 27107, as +Finally, we'll expose the standard port that MongoDB runs on, 27107, as well as define an `ENTRYPOINT` instruction for the container. @@ -67,7 +67,7 @@ container. ENTRYPOINT ["usr/bin/mongod"] Now, lets build the image which will go through the -`Dockerfile` we made and run all of the commands. +Dockerfile we made and run all of the commands. sudo docker build -t /mongodb . diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index 77d75047b6..0c04836b98 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -9,7 +9,7 @@ page_keywords: docker, example, package installation, node, centos > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) The goal of this example is to show you how you can build your own @@ -52,11 +52,11 @@ app using the [Express.js](http://expressjs.com/) framework: app.listen(PORT); console.log('Running on http://localhost:' + PORT); -In the next steps, we’ll look at how you can run this app inside a -CentOS container using Docker. First, you’ll need to build a Docker +In the next steps, we'll look at how you can run this app inside a +CentOS container using Docker. First, you'll need to build a Docker image of your app. -## Creating a `Dockerfile` +## Creating a Dockerfile Create an empty file called `Dockerfile`: @@ -69,47 +69,44 @@ requires to build (this example uses Docker 0.3.4): # DOCKER-VERSION 0.3.4 Next, define the parent image you want to use to build your own image on -top of. Here, we’ll use [CentOS](https://index.docker.io/_/centos/) +top of. Here, we'll use [CentOS](https://index.docker.io/_/centos/) (tag: `6.4`) available on the [Docker index](https://index.docker.io/): FROM centos:6.4 -Since we’re building a Node.js app, you’ll have to install Node.js as +Since we're building a Node.js app, you'll have to install Node.js as well as npm on your CentOS image. Node.js is required to run your app -and npm to install your app’s dependencies defined in +and npm to install your app's dependencies defined in `package.json`. To install the right package for -CentOS, we’ll use the instructions from the [Node.js -wiki](https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6): +CentOS, we'll use the instructions from the [Node.js wiki]( +https://github.com/joyent/node/wiki/Installing-Node.js- +via-package-manager#rhelcentosscientific-linux-6): # Enable EPEL for Node.js RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm # Install Node.js and npm RUN yum install -y npm -To bundle your app’s source code inside the Docker image, use the -`ADD` instruction: +To bundle your app's source code inside the Docker image, use the `ADD` +instruction: # Bundle app source ADD . /src -Install your app dependencies using the `npm` -binary: +Install your app dependencies using the `npm` binary: # Install app dependencies RUN cd /src; npm install -Your app binds to port `8080` so you’ll use the -`EXPOSE` instruction to have it mapped by the -`docker` daemon: +Your app binds to port `8080` so you'll use the` EXPOSE` instruction to have +it mapped by the `docker` daemon: EXPOSE 8080 -Last but not least, define the command to run your app using -`CMD` which defines your runtime, i.e. -`node`, and the path to our app, i.e. -`src/index.js` (see the step where we added the -source to the container): +Last but not least, define the command to run your app using `CMD` which +defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js` +(see the step where we added the source to the container): CMD ["node", "/src/index.js"] @@ -133,10 +130,9 @@ Your `Dockerfile` should now look like this: ## Building your image -Go to the directory that has your `Dockerfile` and -run the following command to build a Docker image. The `-t` -flag let’s you tag your image so it’s easier to find later -using the `docker images` command: +Go to the directory that has your `Dockerfile` and run the following command +to build a Docker image. The `-t` flag let's you tag your image so it's easier +to find later using the `docker images` command: sudo docker build -t /centos-node-hello . @@ -151,10 +147,9 @@ Your image will now be listed by Docker: ## Run the image -Running your image with `-d` runs the container in -detached mode, leaving the container running in the background. The -`-p` flag redirects a public port to a private port -in the container. Run the image you previously built: +Running your image with `-d` runs the container in detached mode, leaving the +container running in the background. The `-p` flag redirects a public port to +a private port in the container. Run the image you previously built: sudo docker run -p 49160:8080 -d /centos-node-hello @@ -179,11 +174,10 @@ To test your app, get the the port of your app that Docker mapped: > ID IMAGE COMMAND ... PORTS > ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080 -In the example above, Docker mapped the `8080` port -of the container to `49160`. +In the example above, Docker mapped the `8080` port of the container to `49160`. -Now you can call your app using `curl` (install if -needed via: `sudo apt-get install curl`): +Now you can call your app using `curl` (install if needed via: +`sudo apt-get install curl`): curl -i localhost:49160 @@ -200,5 +194,4 @@ We hope this tutorial helped you get up and running with Node.js and CentOS on Docker. You can get the full source code at [https://github.com/gasi/docker-node-hello](https://github.com/gasi/docker-node-hello). -Continue to [*Redis -Service*](../running_redis_service/#running-redis-service). +Continue to [*Redis Service*](../running_redis_service/#running-redis-service). diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md index 053bf410c0..93abc06e39 100644 --- a/docs/sources/examples/postgresql_service.md +++ b/docs/sources/examples/postgresql_service.md @@ -9,13 +9,13 @@ page_keywords: docker, example, package installation, postgresql > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) ## Installing PostgreSQL on Docker -Assuming there is no Docker image that suits your needs in [the -index](http://index.docker.io), you can create one yourself. +Assuming there is no Docker image that suits your needs in [the index]( +http://index.docker.io), you can create one yourself. Start by creating a new Dockerfile: @@ -25,7 +25,7 @@ Start by creating a new Dockerfile: > suitably secure. # - # example Dockerfile for http://docs.docker.io/en/latest/examples/postgresql_service/ + # example Dockerfile for http://docs.docker.io/examples/postgresql_service/ # FROM ubuntu @@ -96,8 +96,8 @@ or we can access it from our host (or the network). ### Using container linking -Containers can be linked to another container’s ports directly using -`-link remote_name:local_alias` in the client’s +Containers can be linked to another container's ports directly using +`-link remote_name:local_alias` in the client's `docker run`. This will set a number of environment variables that can then be used to connect: diff --git a/docs/sources/examples/python_web_app.md b/docs/sources/examples/python_web_app.md index 2212f97139..fc454a390a 100644 --- a/docs/sources/examples/python_web_app.md +++ b/docs/sources/examples/python_web_app.md @@ -9,7 +9,7 @@ page_keywords: docker, example, python, web app > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) While using Dockerfiles is the preferred way to create maintainable and @@ -18,13 +18,13 @@ then commit your live changes to an image. The goal of this example is to show you how you can modify your own Docker images by making changes to a running container, and then saving -the results as a new image. We will do that by making a simple ‘hello -world’ Flask web application image. +the results as a new image. We will do that by making a simple `hello +world` Flask web application image. ## Download the initial image -Download the `shykes/pybuilder` Docker image from -the `http://index.docker.io` registry. +Download the `shykes/pybuilder` Docker image from the `http://index.docker.io` +registry. This image contains a `buildapp` script to download the web app and then `pip install` any required @@ -36,7 +36,7 @@ modules, and a `runapp` script that finds the > **Note**: > This container was built with a very old version of docker (May 2013 - > see [shykes/pybuilder](https://github.com/shykes/pybuilder) ), when the -> `Dockerfile` format was different, but the image can +> Dockerfile format was different, but the image can > still be used now. ## Interactively make some modifications @@ -49,7 +49,7 @@ the `$URL` variable. The container is given a name `pybuilder_run` which we will use in the next steps. While this example is simple, you could run any number of interactive -commands, try things out, and then exit when you’re done. +commands, try things out, and then exit when you're done. $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash @@ -76,11 +76,11 @@ mapped to a local port $ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp -- **"docker run -d "** run a command in a new container. We pass "-d" - so it runs as a daemon. -- **"-p 5000"** the web app is going to listen on this port, so it - must be mapped from the container to the host system. -- **/usr/local/bin/runapp** is the command which starts the web app. + - **"docker run -d "** run a command in a new container. We pass "-d" + so it runs as a daemon. + - **"-p 5000"** the web app is going to listen on this port, so it + must be mapped from the container to the host system. + - **/usr/local/bin/runapp** is the command which starts the web app. ## View the container logs @@ -93,7 +93,7 @@ another terminal and continue with the example while watching the result in the logs. $ sudo docker logs -f web_worker - * Running on http://0.0.0.0:5000/ + * Running on http://0.0.0.0:5000/ ## See the webapp output @@ -117,7 +117,7 @@ everything worked as planned you should see the line List `--all` the Docker containers. If this container had already finished running, it will still be listed here -with a status of ‘Exit 0’. +with a status of `Exit 0`. $ sudo docker stop web_worker $ sudo docker rm web_worker pybuilder_run diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md index b67937fab5..2598af2897 100644 --- a/docs/sources/examples/running_redis_service.md +++ b/docs/sources/examples/running_redis_service.md @@ -9,7 +9,7 @@ page_keywords: docker, example, package installation, networking, redis > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) Very simple, no frills, Redis service attached to a web application @@ -33,26 +33,24 @@ Replace `` with your own user name. ## Run the service -Use the image we’ve just created and name your container -`redis`. +Use the image we've just created and name your container `redis`. -Running the service with `-d` runs the container in -detached mode, leaving the container running in the background. +Running the service with `-d` runs the container in detached mode, leaving +the container running in the background. -Importantly, we’re not exposing any ports on our container. Instead -we’re going to use a container link to provide access to our Redis +Importantly, we're not exposing any ports on our container. Instead +we're going to use a container link to provide access to our Redis database. sudo docker run --name redis -d /redis ## Create your web application container -Next we can create a container for our application. We’re going to use -the `-link` flag to create a link to the -`redis` container we’ve just created with an alias -of `db`. This will create a secure tunnel to the -`redis` container and expose the Redis instance -running inside that container to only this container. +Next we can create a container for our application. We're going to use +the `-link` flag to create a link to the `redis` container we've just +created with an alias of `db`. This will create a secure tunnel to the +`redis` container and expose the Redis instance running inside that +container to only this container. sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash @@ -63,7 +61,7 @@ get the `redis-cli` binary to test our connection. apt-get -y install redis-server service redis-server stop -As we’ve used the `--link redis:db` option, Docker +As we've used the `--link redis:db` option, Docker has created some environment variables in our web application container. env | grep DB_ @@ -76,11 +74,10 @@ has created some environment variables in our web application container. DB_PORT_6379_TCP_ADDR=172.17.0.33 DB_PORT_6379_TCP_PROTO=tcp -We can see that we’ve got a small list of environment variables prefixed -with `DB`. The `DB` comes from -the link alias specified when we launched the container. Let’s use the -`DB_PORT_6379_TCP_ADDR` variable to connect to our -Redis container. +We can see that we've got a small list of environment variables prefixed +with `DB`. The `DB` comes from the link alias specified when we launched +the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to +our Redis container. redis-cli -h $DB_PORT_6379_TCP_ADDR redis 172.17.0.33:6379> diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md index ad0b20a628..577ae76aa3 100644 --- a/docs/sources/examples/running_riak_service.md +++ b/docs/sources/examples/running_riak_service.md @@ -9,20 +9,20 @@ page_keywords: docker, example, package installation, networking, riak > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) The goal of this example is to show you how to build a Docker image with Riak pre-installed. -## Creating a `Dockerfile` +## Creating a Dockerfile -Create an empty file called `Dockerfile`: +Create an empty file called Dockerfile: touch Dockerfile Next, define the parent image you want to use to build your image on top -of. We’ll use [Ubuntu](https://index.docker.io/_/ubuntu/) (tag: +of. We'll use [Ubuntu](https://index.docker.io/_/ubuntu/) (tag: `latest`), which is available on the [docker index](http://index.docker.io): @@ -43,13 +43,13 @@ Next, we update the APT cache and apply any updates: After that, we install and setup a few dependencies: -- `curl` is used to download Basho’s APT + - `curl` is used to download Basho's APT repository key -- `lsb-release` helps us derive the Ubuntu release + - `lsb-release` helps us derive the Ubuntu release codename -- `openssh-server` allows us to login to + - `openssh-server` allows us to login to containers remotely and join Riak nodes to form a cluster -- `supervisor` is used manage the OpenSSH and Riak + - `supervisor` is used manage the OpenSSH and Riak processes @@ -66,7 +66,7 @@ After that, we install and setup a few dependencies: RUN echo 'root:basho' | chpasswd -Next, we add Basho’s APT repository: +Next, we add Basho's APT repository: RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add -- RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list @@ -98,10 +98,10 @@ are started: CMD ["/usr/bin/supervisord"] -## Create a `supervisord` configuration file +## Create a supervisord configuration file Create an empty file called `supervisord.conf`. Make -sure it’s at the same directory level as your `Dockerfile`: +sure it's at the same directory level as your Dockerfile: touch supervisord.conf @@ -131,7 +131,7 @@ Now you should be able to build a Docker image for Riak: ## Next steps Riak is a distributed database. Many production deployments consist of -[at least five -nodes](http://basho.com/why-your-riak-cluster-should-have-at-least-five-nodes/). +[at least five nodes]( +http://basho.com/why-your-riak-cluster-should-have-at-least-five-nodes/). See the [docker-riak](https://github.com/hectcastro/docker-riak) project details on how to deploy a Riak cluster using Docker and Pipework. diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 112b9fa441..20d5c12326 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -8,11 +8,11 @@ page_keywords: docker, example, package installation, networking > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) The following Dockerfile sets up an sshd service in a container that you -can use to connect to and inspect other container’s volumes, or to get +can use to connect to and inspect other container's volumes, or to get quick access to a test container. # sshd @@ -38,14 +38,14 @@ Build the image using: $ sudo docker build -rm -t eg_sshd . Then run it. You can then use `docker port` to find -out what host port the container’s port 22 is mapped to: +out what host port the container's port 22 is mapped to: $ sudo docker run -d -P -name test_sshd eg_sshd $ sudo docker port test_sshd 22 0.0.0.0:49154 And now you can ssh to port `49154` on the Docker -daemon’s host IP address (`ip address` or +daemon's host IP address (`ip address` or `ifconfig` can tell you that): $ ssh root@192.168.1.2 -p 49154 diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/examples/using_supervisord.md index 3a0793710f..6faa456080 100644 --- a/docs/sources/examples/using_supervisord.md +++ b/docs/sources/examples/using_supervisord.md @@ -9,25 +9,25 @@ page_keywords: docker, supervisor, process management > - This example assumes you have Docker running in daemon mode. For > more information please see [*Check your Docker > install*](../hello_world/#running-examples). -> - **If you don’t like sudo** then see [*Giving non-root +> - **If you don't like sudo** then see [*Giving non-root > access*](../../installation/binaries/#dockergroup) Traditionally a Docker container runs a single process when it is launched, for example an Apache daemon or a SSH server daemon. Often though you want to run more than one process in a container. There are a number of ways you can achieve this ranging from using a simple Bash -script as the value of your container’s `CMD` +script as the value of your container's `CMD` instruction to installing a process management tool. -In this example we’re going to make use of the process management tool, +In this example we're going to make use of the process management tool, [Supervisor](http://supervisord.org/), to manage multiple processes in our container. Using Supervisor allows us to better control, manage, and -restart the processes we want to run. To demonstrate this we’re going to +restart the processes we want to run. To demonstrate this we're going to install and manage both an SSH daemon and an Apache daemon. ## Creating a Dockerfile -Let’s start by creating a basic `Dockerfile` for our +Let's start by creating a basic `Dockerfile` for our new image. FROM ubuntu:latest @@ -45,20 +45,20 @@ our container. RUN mkdir -p /var/run/sshd RUN mkdir -p /var/log/supervisor -Here we’re installing the `openssh-server`, +Here we're installing the `openssh-server`, `apache2` and `supervisor` -(which provides the Supervisor daemon) packages. We’re also creating two +(which provides the Supervisor daemon) packages. We're also creating two new directories that are needed to run our SSH daemon and Supervisor. -## Adding Supervisor’s configuration file +## Adding Supervisor's configuration file -Now let’s add a configuration file for Supervisor. The default file is +Now let's add a configuration file for Supervisor. The default file is called `supervisord.conf` and is located in `/etc/supervisor/conf.d/`. ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf -Let’s see what is inside our `supervisord.conf` +Let's see what is inside our `supervisord.conf` file. [supervisord] @@ -73,7 +73,7 @@ file. The `supervisord.conf` configuration file contains directives that configure Supervisor and the processes it manages. The first block `[supervisord]` provides configuration -for Supervisor itself. We’re using one directive, `nodaemon` +for Supervisor itself. We're using one directive, `nodaemon` which tells Supervisor to run interactively rather than daemonize. @@ -84,14 +84,14 @@ start each process. ## Exposing ports and running Supervisor -Now let’s finish our `Dockerfile` by exposing some +Now let's finish our `Dockerfile` by exposing some required ports and specifying the `CMD` instruction to start Supervisor when our container launches. EXPOSE 22 80 CMD ["/usr/bin/supervisord"] -Here we’ve exposed ports 22 and 80 on the container and we’re running +Here We've exposed ports 22 and 80 on the container and we're running the `/usr/bin/supervisord` binary when the container launches. @@ -103,7 +103,7 @@ We can now build our new container. ## Running our Supervisor container -Once we’ve got a built image we can launch a container from it. +Once We've got a built image we can launch a container from it. sudo docker run -p 22 -p 80 -t -i /supervisord 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) @@ -113,9 +113,8 @@ Once we’ve got a built image we can launch a container from it. 2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7 . . . -We’ve launched a new container interactively using the -`docker run` command. That container has run -Supervisor and launched the SSH and Apache daemons with it. We’ve -specified the `-p` flag to expose ports 22 and 80. -From here we can now identify the exposed ports and connect to one or -both of the SSH and Apache daemons. +We've launched a new container interactively using the `docker run` command. +That container has run Supervisor and launched the SSH and Apache daemons with +it. We've specified the `-p` flag to expose ports 22 and 80. From here we can +now identify the exposed ports and connect to one or both of the SSH and Apache +daemons. diff --git a/docs/sources/faq.md b/docs/sources/faq.md index 563e07a1c7..2494f33e9c 100644 --- a/docs/sources/faq.md +++ b/docs/sources/faq.md @@ -4,129 +4,126 @@ page_keywords: faq, questions, documentation, docker # FAQ -## Most frequently asked questions. +## Most frequently asked questions ### How much does Docker cost? -> Docker is 100% free, it is open source, so you can use it without -> paying. +Docker is 100% free, it is open source, so you can use it without +paying. ### What open source license are you using? -> We are using the Apache License Version 2.0, see it here: -> [https://github.com/dotcloud/docker/blob/master/LICENSE](https://github.com/dotcloud/docker/blob/master/LICENSE) +We are using the Apache License Version 2.0, see it here: +[https://github.com/dotcloud/docker/blob/master/LICENSE]( +https://github.com/dotcloud/docker/blob/master/LICENSE) ### Does Docker run on Mac OS X or Windows? -> Not at this time, Docker currently only runs on Linux, but you can use -> VirtualBox to run Docker in a virtual machine on your box, and get the -> best of both worlds. Check out the [*Mac OS -> X*](../installation/mac/#macosx) and [*Microsoft -> Windows*](../installation/windows/#windows) installation guides. The -> small Linux distribution boot2docker can be run inside virtual -> machines on these two operating systems. +Not at this time, Docker currently only runs on Linux, but you can use +VirtualBox to run Docker in a virtual machine on your box, and get the +best of both worlds. Check out the [*Mac OS X*](../installation/mac/#macosx) +and [*Microsoft Windows*](../installation/windows/#windows) installation +guides. The small Linux distribution boot2docker can be run inside virtual +machines on these two operating systems. ### How do containers compare to virtual machines? -> They are complementary. VMs are best used to allocate chunks of -> hardware resources. Containers operate at the process level, which -> makes them very lightweight and perfect as a unit of software -> delivery. +They are complementary. VMs are best used to allocate chunks of +hardware resources. Containers operate at the process level, which +makes them very lightweight and perfect as a unit of software +delivery. ### What does Docker add to just plain LXC? -> Docker is not a replacement for LXC. "LXC" refers to capabilities of -> the Linux kernel (specifically namespaces and control groups) which -> allow sandboxing processes from one another, and controlling their -> resource allocations. On top of this low-level foundation of kernel -> features, Docker offers a high-level tool with several powerful -> functionalities: -> -> - *Portable deployment across machines.* -> : Docker defines a format for bundling an application and all -> its dependencies into a single object which can be transferred -> to any Docker-enabled machine, and executed there with the -> guarantee that the execution environment exposed to the -> application will be the same. LXC implements process -> sandboxing, which is an important pre-requisite for portable -> deployment, but that alone is not enough for portable -> deployment. If you sent me a copy of your application -> installed in a custom LXC configuration, it would almost -> certainly not run on my machine the way it does on yours, -> because it is tied to your machine’s specific configuration: -> networking, storage, logging, distro, etc. Docker defines an -> abstraction for these machine-specific settings, so that the -> exact same Docker container can run - unchanged - on many -> different machines, with many different configurations. -> -> - *Application-centric.* -> : Docker is optimized for the deployment of applications, as -> opposed to machines. This is reflected in its API, user -> interface, design philosophy and documentation. By contrast, -> the `lxc` helper scripts focus on -> containers as lightweight machines - basically servers that -> boot faster and need less RAM. We think there’s more to -> containers than just that. -> -> - *Automatic build.* -> : Docker includes [*a tool for developers to automatically -> assemble a container from their source -> code*](../reference/builder/#dockerbuilder), with full control -> over application dependencies, build tools, packaging etc. -> They are free to use -> `make, maven, chef, puppet, salt,` Debian -> packages, RPMs, source tarballs, or any combination of the -> above, regardless of the configuration of the machines. -> -> - *Versioning.* -> : Docker includes git-like capabilities for tracking successive -> versions of a container, inspecting the diff between versions, -> committing new versions, rolling back etc. The history also -> includes how a container was assembled and by whom, so you get -> full traceability from the production server all the way back -> to the upstream developer. Docker also implements incremental -> uploads and downloads, similar to `git pull` -> , so new versions of a container can be transferred -> by only sending diffs. -> -> - *Component re-use.* -> : Any container can be used as a [*"base -> image"*](../terms/image/#base-image-def) to create more -> specialized components. This can be done manually or as part -> of an automated build. For example you can prepare the ideal -> Python environment, and use it as a base for 10 different -> applications. Your ideal Postgresql setup can be re-used for -> all your future projects. And so on. -> -> - *Sharing.* -> : Docker has access to a [public -> registry](http://index.docker.io) where thousands of people -> have uploaded useful containers: anything from Redis, CouchDB, -> Postgres to IRC bouncers to Rails app servers to Hadoop to -> base images for various Linux distros. The -> [*registry*](../reference/api/registry_index_spec/#registryindexspec) -> also includes an official "standard library" of useful -> containers maintained by the Docker team. The registry itself -> is open-source, so anyone can deploy their own registry to -> store and transfer private containers, for internal server -> deployments for example. -> -> - *Tool ecosystem.* -> : Docker defines an API for automating and customizing the -> creation and deployment of containers. There are a huge number -> of tools integrating with Docker to extend its capabilities. -> PaaS-like deployment (Dokku, Deis, Flynn), multi-node -> orchestration (Maestro, Salt, Mesos, Openstack Nova), -> management dashboards (docker-ui, Openstack Horizon, -> Shipyard), configuration management (Chef, Puppet), continuous -> integration (Jenkins, Strider, Travis), etc. Docker is rapidly -> establishing itself as the standard for container-based -> tooling. -> +Docker is not a replacement for LXC. "LXC" refers to capabilities of +the Linux kernel (specifically namespaces and control groups) which +allow sandboxing processes from one another, and controlling their +resource allocations. On top of this low-level foundation of kernel +features, Docker offers a high-level tool with several powerful +functionalities: + + - *Portable deployment across machines.* + Docker defines a format for bundling an application and all + its dependencies into a single object which can be transferred + to any Docker-enabled machine, and executed there with the + guarantee that the execution environment exposed to the + application will be the same. LXC implements process + sandboxing, which is an important pre-requisite for portable + deployment, but that alone is not enough for portable + deployment. If you sent me a copy of your application + installed in a custom LXC configuration, it would almost + certainly not run on my machine the way it does on yours, + because it is tied to your machine's specific configuration: + networking, storage, logging, distro, etc. Docker defines an + abstraction for these machine-specific settings, so that the + exact same Docker container can run - unchanged - on many + different machines, with many different configurations. + + - *Application-centric.* + Docker is optimized for the deployment of applications, as + opposed to machines. This is reflected in its API, user + interface, design philosophy and documentation. By contrast, + the `lxc` helper scripts focus on + containers as lightweight machines - basically servers that + boot faster and need less RAM. We think there's more to + containers than just that. + + - *Automatic build.* + Docker includes [*a tool for developers to automatically + assemble a container from their source + code*](../reference/builder/#dockerbuilder), with full control + over application dependencies, build tools, packaging etc. + They are free to use `make`, `maven`, `chef`, `puppet`, `salt,` + Debian packages, RPMs, source tarballs, or any combination of the + above, regardless of the configuration of the machines. + + - *Versioning.* + Docker includes git-like capabilities for tracking successive + versions of a container, inspecting the diff between versions, + committing new versions, rolling back etc. The history also + includes how a container was assembled and by whom, so you get + full traceability from the production server all the way back + to the upstream developer. Docker also implements incremental + uploads and downloads, similar to `git pull`, so new versions + of a container can be transferred by only sending diffs. + + - *Component re-use.* + Any container can be used as a [*"base image"*]( + ../terms/image/#base-image-def) to create more specialized components. + This can be done manually or as part of an automated build. For example + you can prepare the ideal Python environment, and use it as a base for + 10 different applications. Your ideal Postgresql setup can be re-used for + all your future projects. And so on. + + - *Sharing.* + Docker has access to a [public registry](http://index.docker.io) where + thousands of people have uploaded useful containers: anything from Redis, + CouchDB, Postgres to IRC bouncers to Rails app servers to Hadoop to + base images for various Linux distros. The + [*registry*](../reference/api/registry_index_spec/#registryindexspec) + also includes an official "standard library" of useful + containers maintained by the Docker team. The registry itself + is open-source, so anyone can deploy their own registry to + store and transfer private containers, for internal server + deployments for example. + + - *Tool ecosystem.* + Docker defines an API for automating and customizing the + creation and deployment of containers. There are a huge number + of tools integrating with Docker to extend its capabilities. + PaaS-like deployment (Dokku, Deis, Flynn), multi-node + orchestration (Maestro, Salt, Mesos, Openstack Nova), + management dashboards (docker-ui, Openstack Horizon, + Shipyard), configuration management (Chef, Puppet), continuous + integration (Jenkins, Strider, Travis), etc. Docker is rapidly + establishing itself as the standard for container-based + tooling. + ### What is different between a Docker container and a VM? -There’s a great StackOverflow answer [showing the -differences](http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine). +There's a great StackOverflow answer [showing the differences]( +http://stackoverflow.com/questions/16047306/ +how-is-docker-io-different-from-a-normal-virtual-machine). ### Do I lose my data when the container exits? @@ -145,74 +142,70 @@ running in parallel. ### How do I connect Docker containers? Currently the recommended way to link containers is via the link -primitive. You can see details of how to [work with links -here](http://docs.docker.io/en/latest/use/working_with_links_names/). +primitive. You can see details of how to [work with links here]( +http://docs.docker.io/use/working_with_links_names/). Also of useful when enabling more flexible service portability is the -[Ambassador linking -pattern](http://docs.docker.io/en/latest/use/ambassador_pattern_linking/). +[Ambassador linking pattern]( +http://docs.docker.io/use/ambassador_pattern_linking/). ### How do I run more than one process in a Docker container? -Any capable process supervisor such as -[http://supervisord.org/](http://supervisord.org/), runit, s6, or -daemontools can do the trick. Docker will start up the process -management daemon which will then fork to run additional processes. As -long as the processor manager daemon continues to run, the container -will continue to as well. You can see a more substantial example [that -uses supervisord -here](http://docs.docker.io/en/latest/examples/using_supervisord/). +Any capable process supervisor such as [http://supervisord.org/]( +http://supervisord.org/), runit, s6, or daemontools can do the trick. +Docker will start up the process management daemon which will then fork +to run additional processes. As long as the processor manager daemon continues +to run, the container will continue to as well. You can see a more substantial +example [that uses supervisord here]( +http://docs.docker.io/examples/using_supervisord/). ### What platforms does Docker run on? Linux: -- Ubuntu 12.04, 13.04 et al -- Fedora 19/20+ -- RHEL 6.5+ -- Centos 6+ -- Gentoo -- ArchLinux -- openSUSE 12.3+ -- CRUX 3.0+ + - Ubuntu 12.04, 13.04 et al + - Fedora 19/20+ + - RHEL 6.5+ + - Centos 6+ + - Gentoo + - ArchLinux + - openSUSE 12.3+ + - CRUX 3.0+ Cloud: -- Amazon EC2 -- Google Compute Engine -- Rackspace + - Amazon EC2 + - Google Compute Engine + - Rackspace ### How do I report a security issue with Docker? -You can learn about the project’s security policy -[here](http://www.docker.io/security/) and report security issues to -this [mailbox](mailto:security%40docker.com). +You can learn about the project's security policy +[here](https://www.docker.io/security/) and report security issues to +this [mailbox](mailto:security@docker.com). ### Why do I need to sign my commits to Docker with the DCO? -Please read [our blog -post](http://blog.docker.io/2014/01/docker-code-contributions-require-developer-certificate-of-origin/) +Please read [our blog post]( +http://blog.docker.io/2014/01/ +docker-code-contributions-require-developer-certificate-of-origin/) on the introduction of the DCO. ### Can I help by adding some questions and answers? -Definitely! You can fork [the -repo](http://www.github.com/dotcloud/docker) and edit the documentation -sources. +Definitely! You can fork [the repo](https://github.com/dotcloud/docker) and +edit the documentation sources. ### Where can I find more answers? You can find more answers on: -- [Docker user - mailinglist](https://groups.google.com/d/forum/docker-user) -- [Docker developer - mailinglist](https://groups.google.com/d/forum/docker-dev) +- [Docker user mailinglist](https://groups.google.com/d/forum/docker-user) +- [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev) - [IRC, docker on freenode](irc://chat.freenode.net#docker) -- [GitHub](http://www.github.com/dotcloud/docker) -- [Ask questions on - Stackoverflow](http://stackoverflow.com/search?q=docker) +- [GitHub](https://github.com/dotcloud/docker) +- [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) - [Join the conversation on Twitter](http://twitter.com/docker) -Looking for something else to read? Checkout the [*Hello -World*](../examples/hello_world/#hello-world) example. +Looking for something else to read? Checkout the [*Hello World*]( +../examples/hello_world/#hello-world) example. diff --git a/docs/sources/index.md b/docs/sources/index.md index 42f3286352..d582321563 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -8,7 +8,7 @@ page_keywords: docker, introduction, documentation, about, technology, understan ## Introduction -[**Docker**](http://www.docker.io) is a container based virtualization +[**Docker**](https://www.docker.io) is a container based virtualization framework. Unlike traditional virtualization Docker is fast, lightweight and easy to use. Docker allows you to create containers holding all the dependencies for an application. Each container is kept isolated diff --git a/docs/sources/index/accounts.md b/docs/sources/index/accounts.md index 216b0c17ee..c3138b61da 100644 --- a/docs/sources/index/accounts.md +++ b/docs/sources/index/accounts.md @@ -6,23 +6,23 @@ page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, do ## Docker IO and Docker Index Accounts -You can `search` for Docker images and `pull` them from the [Docker Index] -(https://index.docker.io) without signing in or even having an account. However, +You can `search` for Docker images and `pull` them from the [Docker Index]( +https://index.docker.io) without signing in or even having an account. However, in order to `push` images, leave comments or to *star* a repository, you are going to need a [Docker IO](https://www.docker.io) account. ### Registration for a Docker IO Account -You can get a Docker IO account by [signing up for one here] -(https://index.docker.io/account/signup/). A valid email address is required to +You can get a Docker IO account by [signing up for one here]( +https://index.docker.io/account/signup/). A valid email address is required to register, which you will need to verify for account activation. ### Email activation process You need to have at least one verified email address to be able to use your Docker IO account. If you can't find the validation email, you can request -another by visiting the [Resend Email Confirmation] -(https://index.docker.io/account/resend-email-confirmation/) page. +another by visiting the [Resend Email Confirmation]( +https://index.docker.io/account/resend-email-confirmation/) page. ### Password reset process diff --git a/docs/sources/installation.md b/docs/sources/installation.md index 0ee7b2f903..66b28b2b3c 100644 --- a/docs/sources/installation.md +++ b/docs/sources/installation.md @@ -9,17 +9,17 @@ techniques for installing Docker all the time. ## Contents: -- [Ubuntu](ubuntulinux/) -- [Red Hat Enterprise Linux](rhel/) -- [Fedora](fedora/) -- [Arch Linux](archlinux/) -- [CRUX Linux](cruxlinux/) -- [Gentoo](gentoolinux/) -- [openSUSE](openSUSE/) -- [FrugalWare](frugalware/) -- [Mac OS X](mac/) -- [Windows](windows/) -- [Amazon EC2](amazon/) -- [Rackspace Cloud](rackspace/) -- [Google Cloud Platform](google/) -- [Binaries](binaries/) \ No newline at end of file + - [Ubuntu](ubuntulinux/) + - [Red Hat Enterprise Linux](rhel/) + - [Fedora](fedora/) + - [Arch Linux](archlinux/) + - [CRUX Linux](cruxlinux/) + - [Gentoo](gentoolinux/) + - [openSUSE](openSUSE/) + - [FrugalWare](frugalware/) + - [Mac OS X](mac/) + - [Windows](windows/) + - [Amazon EC2](amazon/) + - [Rackspace Cloud](rackspace/) + - [Google Cloud Platform](google/) + - [Binaries](binaries/) \ No newline at end of file diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index f97c8fde9e..06ee65a772 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -5,47 +5,47 @@ page_keywords: amazon ec2, virtualization, cloud, docker, documentation, install # Amazon EC2 > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) There are several ways to install Docker on AWS EC2: -- [*Amazon QuickStart (Release Candidate - March - 2014)*](#amazon-quickstart-release-candidate-march-2014) or -- [*Amazon QuickStart*](#amazon-quickstart) or -- [*Standard Ubuntu Installation*](#standard-ubuntu-installation) + - [*Amazon QuickStart (Release Candidate - March 2014)*]( + #amazon-quickstart-release-candidate-march-2014) or + - [*Amazon QuickStart*](#amazon-quickstart) or + - [*Standard Ubuntu Installation*](#standard-ubuntu-installation) -**You’ll need an** [AWS account](http://aws.amazon.com/) **first, of +**You'll need an** [AWS account](http://aws.amazon.com/) **first, of course.** ## Amazon QuickStart -1. **Choose an image:** - - Launch the [Create Instance - Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) - menu on your AWS Console. - - Click the `Select` button for a 64Bit Ubuntu - image. For example: Ubuntu Server 12.04.3 LTS - - For testing you can use the default (possibly free) - `t1.micro` instance (more info on - [pricing](http://aws.amazon.com/en/ec2/pricing/)). - - Click the `Next: Configure Instance Details` - button at the bottom right. +1. **Choose an image:** + - Launch the [Create Instance + Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) + menu on your AWS Console. + - Click the `Select` button for a 64Bit Ubuntu + image. For example: Ubuntu Server 12.04.3 LTS + - For testing you can use the default (possibly free) + `t1.micro` instance (more info on + [pricing](http://aws.amazon.com/ec2/pricing/)). + - Click the `Next: Configure Instance Details` + button at the bottom right. -2. **Tell CloudInit to install Docker:** - - When you’re on the "Configure Instance Details" step, expand the - "Advanced Details" section. - - Under "User data", select "As text". - - Enter `#include https://get.docker.io` into - the instance *User Data*. - [CloudInit](https://help.ubuntu.com/community/CloudInit) is part - of the Ubuntu image you chose; it will bootstrap Docker by - running the shell script located at this URL. +2. **Tell CloudInit to install Docker:** + - When you're on the "Configure Instance Details" step, expand the + "Advanced Details" section. + - Under "User data", select "As text". + - Enter `#include https://get.docker.io` into + the instance *User Data*. + [CloudInit](https://help.ubuntu.com/community/CloudInit) is part + of the Ubuntu image you chose; it will bootstrap Docker by + running the shell script located at this URL. -3. After a few more standard choices where defaults are probably ok, - your AWS Ubuntu instance with Docker should be running! +3. After a few more standard choices where defaults are probably ok, + your AWS Ubuntu instance with Docker should be running! **If this is your first AWS instance, you may need to set up your Security Group to allow SSH.** By default all incoming ports to your new @@ -55,39 +55,39 @@ get timeouts when you try to connect. Installing with `get.docker.io` (as above) will create a service named `lxc-docker`. It will also set up a [*docker group*](../binaries/#dockergroup) and you may want to -add the *ubuntu* user to it so that you don’t have to use +add the *ubuntu* user to it so that you don't have to use `sudo` for every Docker command. -Once you’ve got Docker installed, you’re ready to try it out – head on +Once you`ve got Docker installed, you're ready to try it out – head on over to the [*First steps with Docker*](../../use/basics/) or [*Examples*](../../examples/) section. ## Amazon QuickStart (Release Candidate - March 2014) Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). -Docker packages can now be installed from Amazon’s provided Software +Docker packages can now be installed from Amazon's provided Software Repository. -1. **Choose an image:** - - Launch the [Create Instance - Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) - menu on your AWS Console. - - Click the `Community AMI` menu option on the - left side - - Search for ‘2014.03’ and select one of the Amazon provided AMI, - for example `amzn-ami-pv-2014.03.rc-0.x86_64-ebs` - - For testing you can use the default (possibly free) - `t1.micro` instance (more info on - [pricing](http://aws.amazon.com/en/ec2/pricing/)). - - Click the `Next: Configure Instance Details` - button at the bottom right. +1. **Choose an image:** + - Launch the [Create Instance + Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) + menu on your AWS Console. + - Click the `Community AMI` menu option on the + left side + - Search for `2014.03` and select one of the Amazon provided AMI, + for example `amzn-ami-pv-2014.03.rc-0.x86_64-ebs` + - For testing you can use the default (possibly free) + `t1.micro` instance (more info on + [pricing](http://aws.amazon.com/ec2/pricing/)). + - Click the `Next: Configure Instance Details` + button at the bottom right. -2. After a few more standard choices where defaults are probably ok, - your Amazon Linux instance should be running! -3. SSH to your instance to install Docker : - `ssh -i ec2-user@` +2. After a few more standard choices where defaults are probably ok, + your Amazon Linux instance should be running! +3. SSH to your instance to install Docker : + `ssh -i ec2-user@` -4. Once connected to the instance, type +4. Once connected to the instance, type `sudo yum install -y docker ; sudo service docker start` to install and start Docker @@ -100,5 +100,4 @@ QuickStart*](#amazon-quickstart) to pick an image (or use one of your own) and skip the step with the *User Data*. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) instructions. -Continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +Continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md index 3eebdecdc8..6e970b96f6 100644 --- a/docs/sources/installation/archlinux.md +++ b/docs/sources/installation/archlinux.md @@ -5,24 +5,24 @@ page_keywords: arch linux, virtualization, docker, documentation, installation # Arch Linux > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published Installing on Arch Linux can be handled via the package in community: -- [docker](https://www.archlinux.org/packages/community/x86_64/docker/) + - [docker](https://www.archlinux.org/packages/community/x86_64/docker/) or the following AUR package: -- [docker-git](https://aur.archlinux.org/packages/docker-git/) + - [docker-git](https://aur.archlinux.org/packages/docker-git/) The docker package will install the latest tagged version of docker. The docker-git package will build from the current master branch. @@ -32,11 +32,11 @@ docker-git package will build from the current master branch. Docker depends on several packages which are specified as dependencies in the packages. The core dependencies are: -- bridge-utils -- device-mapper -- iproute2 -- lxc -- sqlite + - bridge-utils + - device-mapper + - iproute2 + - lxc + - sqlite ## Installation diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md index b62e2d071b..f33508c9d7 100644 --- a/docs/sources/installation/binaries.md +++ b/docs/sources/installation/binaries.md @@ -5,8 +5,8 @@ page_keywords: binaries, installation, docker, documentation, linux # Binaries > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) @@ -22,16 +22,16 @@ packages for many distributions, and more keep showing up all the time! To run properly, docker needs the following software to be installed at runtime: -- iptables version 1.4 or later -- Git version 1.7 or later -- procps (or similar provider of a "ps" executable) -- XZ Utils 4.9 or later -- a [properly - mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) - cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount - point [is](https://github.com/dotcloud/docker/issues/2683) - [not](https://github.com/dotcloud/docker/issues/3485) - [sufficient](https://github.com/dotcloud/docker/issues/4568)) + - iptables version 1.4 or later + - Git version 1.7 or later + - procps (or similar provider of a "ps" executable) + - XZ Utils 4.9 or later + - a [properly mounted]( + https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount + point [is](https://github.com/dotcloud/docker/issues/2683) + [not](https://github.com/dotcloud/docker/issues/3485) + [sufficient](https://github.com/dotcloud/docker/issues/4568)) ## Check kernel dependencies @@ -52,7 +52,7 @@ Linux kernel (it even builds on OSX!). > **Note**: > If you have trouble downloading the binary, you can also get the smaller > compressed release file: -> [https://get.docker.io/builds/Linux/x86\_64/docker-latest.tgz]( +> [https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz]( > https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz) ## Run the docker daemon @@ -74,7 +74,7 @@ Unix group called *docker* and add users to it, then the socket read/writable by the *docker* group when the daemon starts. The `docker` daemon must always run as the root user, but if you run the `docker` client as a user in the -*docker* group then you don’t need to add `sudo` to +*docker* group then you don't need to add `sudo` to all the client commands. > **Warning**: @@ -99,5 +99,4 @@ Then follow the regular installation steps. # run a container and open an interactive shell in the container sudo ./docker run -i -t ubuntu /bin/bash -Continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +Continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index 9bb336a6f5..f37d720389 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -5,13 +5,13 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation # CRUX Linux > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published. @@ -19,9 +19,9 @@ page_keywords: crux linux, virtualization, Docker, documentation, installation Installing on CRUX Linux can be handled via the ports from [James Mills](http://prologic.shortcircuit.net.au/): -- [docker](https://bitbucket.org/prologic/ports/src/tip/docker/) -- [docker-bin](https://bitbucket.org/prologic/ports/src/tip/docker-bin/) -- [docker-git](https://bitbucket.org/prologic/ports/src/tip/docker-git/) +- [docker](https://bitbucket.org/prologic/ports/src/tip/docker/) +- [docker-bin](https://bitbucket.org/prologic/ports/src/tip/docker-bin/) +- [docker-git](https://bitbucket.org/prologic/ports/src/tip/docker-git/) The `docker` port will install the latest tagged version of Docker. The `docker-bin` port will @@ -33,7 +33,7 @@ master branch. For the time being (*until the CRUX Docker port(s) get into the official contrib repository*) you will need to install [James -Mills’](https://bitbucket.org/prologic/ports) ports repository. You can +Mills`](https://bitbucket.org/prologic/ports) ports repository. You can do so via: Download the `httpup` file to @@ -87,7 +87,5 @@ There is a rc script created for Docker. To start the Docker service: To start on system boot: -- Edit `/etc/rc.conf` -- Put `docker` into the `SERVICES=(...)` - array after `net`. - + - Edit `/etc/rc.conf` + - Put `docker` into the `SERVICES=(...)` array after `net`. diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 0718df032c..bd82674b01 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -5,13 +5,13 @@ page_keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, v # Fedora > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published. @@ -25,7 +25,7 @@ bit** architecture. The `docker-io` package provides Docker on Fedora. If you have the (unrelated) `docker` package installed already, it will -conflict with `docker-io`. There’s a [bug +conflict with `docker-io`. There's a [bug report](https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for it. To proceed with `docker-io` installation on Fedora 19, please remove `docker` first. @@ -48,7 +48,7 @@ To update the `docker-io` package: sudo yum -y update docker-io -Now that it’s installed, let’s start the Docker daemon. +Now that it's installed, let's start the Docker daemon. sudo systemctl start docker @@ -56,7 +56,7 @@ If we want Docker to start at boot, we should also: sudo systemctl enable docker -Now let’s verify that Docker is working. +Now let's verify that Docker is working. sudo docker run -i -t fedora /bin/bash diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md index 0e9f9c9f1b..1d640cf8fd 100644 --- a/docs/sources/installation/frugalware.md +++ b/docs/sources/installation/frugalware.md @@ -5,21 +5,21 @@ page_keywords: frugalware linux, virtualization, docker, documentation, installa # FrugalWare > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published Installing on FrugalWare is handled via the official packages: -- [lxc-docker i686](http://www.frugalware.org/packages/200141) -- [lxc-docker x86\_64](http://www.frugalware.org/packages/200130) + - [lxc-docker i686](http://www.frugalware.org/packages/200141) + - [lxc-docker x86_64](http://www.frugalware.org/packages/200130) The lxc-docker package will install the latest tagged version of Docker. @@ -28,13 +28,13 @@ The lxc-docker package will install the latest tagged version of Docker. Docker depends on several packages which are specified as dependencies in the packages. The core dependencies are: -- systemd -- lvm2 -- sqlite3 -- libguestfs -- lxc -- iproute2 -- bridge-utils + - systemd + - lvm2 + - sqlite3 + - libguestfs + - lxc + - iproute2 + - bridge-utils ## Installation diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md index 87e1c78e84..49700ea563 100644 --- a/docs/sources/installation/gentoolinux.md +++ b/docs/sources/installation/gentoolinux.md @@ -5,23 +5,23 @@ page_keywords: gentoo linux, virtualization, docker, documentation, installation # Gentoo > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published Installing Docker on Gentoo Linux can be accomplished using one of two -methods. The first and best way if you’re looking for a stable +methods. The first and best way if you're looking for a stable experience is to use the official app-emulation/docker package directly in the portage tree. -If you’re looking for a `-bin` ebuild, a live +If you're looking for a `-bin` ebuild, a live ebuild, or bleeding edge ebuild changes/fixes, the second installation method is to use the overlay provided at [https://github.com/tianon/docker-overlay](https://github.com/tianon/docker-overlay) @@ -31,8 +31,8 @@ using the overlay can be found in [the overlay README](https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay). Note that sometimes there is a disparity between the latest version and -what’s in the overlay, and between the latest version in the overlay and -what’s in the portage tree. Please be patient, and the latest version +what's in the overlay, and between the latest version in the overlay and +what's in the portage tree. Please be patient, and the latest version should propagate shortly. ## Installation @@ -47,15 +47,15 @@ since that is the simplest installation path. If any issues arise from this ebuild or the resulting binary, including and especially missing kernel configuration flags and/or dependencies, -[open an issue on the docker-overlay -repository](https://github.com/tianon/docker-overlay/issues) or ping -tianon directly in the \#docker IRC channel on the freenode network. +[open an issue on the docker-overlay repository]( +https://github.com/tianon/docker-overlay/issues) or ping +tianon directly in the #docker IRC channel on the freenode network. ## Starting Docker Ensure that you are running a kernel that includes all the necessary modules and/or configuration for LXC (and optionally for device-mapper -and/or AUFS, depending on the storage driver you’ve decided to use). +and/or AUFS, depending on the storage driver you`ve decided to use). ### OpenRC diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md index 611e9bb7bc..bec7d0ba13 100644 --- a/docs/sources/installation/google.md +++ b/docs/sources/installation/google.md @@ -2,22 +2,22 @@ page_title: Installation on Google Cloud Platform page_description: Please note this project is currently under heavy development. It should not be used in production. page_keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform -# [Google Cloud Platform](https://cloud.google.com/) +# Google Cloud Platform > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) -## [Compute Engine](https://developers.google.com/compute) QuickStart for [Debian](https://www.debian.org) +## Compute Engine QuickStart for Debian -1. Go to [Google Cloud Console](https://cloud.google.com/console) and - create a new Cloud Project with [Compute Engine - enabled](https://developers.google.com/compute/docs/signup). -2. Download and configure the [Google Cloud - SDK](https://developers.google.com/cloud/sdk/) to use your project - with the following commands: +1. Go to [Google Cloud Console](https://cloud.google.com/console) and + create a new Cloud Project with [Compute Engine + enabled](https://developers.google.com/compute/docs/signup). +2. Download and configure the [Google Cloud SDK]( + https://developers.google.com/cloud/sdk/) to use your project + with the following commands: diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 4b70ef8371..71fd9f5fed 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -9,8 +9,8 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linu > 0.8). However, they are subject to change. > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) @@ -87,7 +87,7 @@ Run the following commands to get it downloaded and set up: sudo mkdir -p /usr/local/bin sudo cp docker /usr/local/bin/ -And that’s it! Let’s check out how to use it. +And that's it! Let's check out how to use it. ## How To Use Docker On Mac OS X @@ -124,7 +124,7 @@ application. ### Forwarding VM Port Range to Host If we take the port range that docker uses by default with the -P option -(49000-49900), and forward same range from host to vm, we’ll be able to +(49000-49900), and forward same range from host to vm, we'll be able to interact with our containers as if they were running locally: # vm must be powered off @@ -159,7 +159,7 @@ See the GitHub page for ### Upgrading to a newer release of boot2docker To upgrade an initialised VM, you can use the following 3 commands. Your -persistence disk will not be changed, so you won’t lose your images and +persistence disk will not be changed, so you won't lose your images and containers: ./boot2docker stop @@ -168,12 +168,11 @@ containers: ### About the way Docker works on Mac OS X: -Docker has two key components: the `docker` daemon -and the `docker` client. The tool works by client -commanding the daemon. In order to work and do its magic, the daemon -makes use of some Linux Kernel features (e.g. LXC, name spaces etc.), -which are not supported by OS X. Therefore, the solution of getting -Docker to run on OS X consists of running it inside a lightweight +Docker has two key components: the `docker` daemon and the `docker` client. +The tool works by client commanding the daemon. In order to work and do its +magic, the daemon makes use of some Linux Kernel features (e.g. LXC, name +spaces etc.), which are not supported by OS X. Therefore, the solution of +getting Docker to run on OS X consists of running it inside a lightweight virtual machine. In order to simplify things, Docker comes with a bash script to make this whole process as easy as possible (i.e. boot2docker). diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md index ebd8ea6f6e..b4fa9183a5 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/openSUSE.md @@ -5,13 +5,13 @@ page_keywords: openSUSE, virtualbox, docker, documentation, installation # openSUSE > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published @@ -39,13 +39,13 @@ Install the Docker package. sudo zypper in docker -It’s also possible to install Docker using openSUSE’s 1-click install. +It's also possible to install Docker using openSUSE's1-click install. Just visit [this](http://software.opensuse.org/package/docker) page, select your openSUSE version and click on the installation link. This will add the right repository to your system and it will also install the docker package. -Now that it’s installed, let’s start the Docker daemon. +Now that it's installed, let's start the Docker daemon. sudo systemctl start docker @@ -59,5 +59,6 @@ Docker daemon. sudo usermod -G docker -**Done!**, now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +**Done!** +Now continue with the [*Hello World*]( +../../examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/rackspace.md b/docs/sources/installation/rackspace.md index 2d213a7fc9..8cce292b79 100644 --- a/docs/sources/installation/rackspace.md +++ b/docs/sources/installation/rackspace.md @@ -5,7 +5,7 @@ page_keywords: Rackspace Cloud, installation, docker, linux, ubuntu # Rackspace Cloud > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published @@ -20,8 +20,8 @@ If you are using any Linux not already shipping with the 3.8 kernel you will need to install it. And this is a little more difficult on Rackspace. -Rackspace boots their servers using grub’s `menu.lst` -and does not like non ‘virtual’ packages (e.g. Xen compatible) +Rackspace boots their servers using grub's `menu.lst` +and does not like non `virtual` packages (e.g. Xen compatible) kernels there, although they do work. This results in `update-grub` not having the expected result, and you will need to set the kernel manually. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index d7df63920d..715cca74a2 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -5,20 +5,20 @@ page_keywords: Docker, Docker documentation, requirements, linux, rhel, centos # Red Hat Enterprise Linux > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) > **Note**: -> This is a community contributed installation path. The only ‘official’ +> This is a community contributed installation path. The only `official` > installation is using the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) > installation path. This version may be out of date because it depends on > some binaries to be updated and published Docker is available for **RHEL** on EPEL. These instructions should work for both RHEL and CentOS. They will likely work for other binary -compatible EL6 distributions as well, but they haven’t been tested. +compatible EL6 distributions as well, but they haven't been tested. Please note that this package is part of [Extra Packages for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort @@ -42,12 +42,11 @@ The `docker-io` package provides Docker on EPEL. If you already have the (unrelated) `docker` package installed, it will conflict with `docker-io`. -There’s a [bug -report](https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for -it. To proceed with `docker-io` installation, please -remove `docker` first. +There's a [bug report]( +https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for it. +To proceed with `docker-io` installation, please remove `docker` first. -Next, let’s install the `docker-io` package which +Next, let's install the `docker-io` package which will install Docker on our host. sudo yum -y install docker-io @@ -56,7 +55,7 @@ To update the `docker-io` package sudo yum -y update docker-io -Now that it’s installed, let’s start the Docker daemon. +Now that it's installed, let's start the Docker daemon. sudo service docker start @@ -64,15 +63,15 @@ If we want Docker to start at boot, we should also: sudo chkconfig docker on -Now let’s verify that Docker is working. +Now let's verify that Docker is working. sudo docker run -i -t fedora /bin/bash -**Done!**, now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +**Done!** +Now continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. ## Issues? -If you have any issues - please report them directly in the [Red Hat -Bugzilla for docker-io -component](https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io). +If you have any issues - please report them directly in the +[Red Hat Bugzilla for docker-io component]( +https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io). diff --git a/docs/sources/installation/softlayer.md b/docs/sources/installation/softlayer.md index 0b14ac567d..6468829594 100644 --- a/docs/sources/installation/softlayer.md +++ b/docs/sources/installation/softlayer.md @@ -5,32 +5,32 @@ page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, inst # IBM SoftLayer > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) ## IBM SoftLayer QuickStart -1. Create an [IBM SoftLayer - account](https://www.softlayer.com/cloudlayer/). -2. Log in to the [SoftLayer - Console](https://control.softlayer.com/devices/). -3. Go to [Order Hourly Computing Instance - Wizard](https://manage.softlayer.com/Sales/orderHourlyComputingInstance) - on your SoftLayer Console. -4. Create a new *CloudLayer Computing Instance* (CCI) using the default - values for all the fields and choose: +1. Create an [IBM SoftLayer account]( + https://www.softlayer.com/cloud-servers/). +2. Log in to the [SoftLayer Console]( + https://control.softlayer.com/devices/). +3. Go to [Order Hourly Computing Instance Wizard]( + https://manage.softlayer.com/Sales/orderHourlyComputingInstance) + on your SoftLayer Console. +4. Create a new *CloudLayer Computing Instance* (CCI) using the default + values for all the fields and choose: -- *First Available* as `Datacenter` and -- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* - as `Operating System`. + - *First Available* as `Datacenter` and + - *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* + as `Operating System`. -5. Click the *Continue Your Order* button at the bottom right and - select *Go to checkout*. -6. Insert the required *User Metadata* and place the order. -7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) - instructions. +5. Click the *Continue Your Order* button at the bottom right and + select *Go to checkout*. +6. Insert the required *User Metadata* and place the order. +7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) + instructions. -Continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +Continue with the [*Hello World*]( +../../examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index 07d6072b5d..c4152ec1c4 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -9,16 +9,16 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, > earlier version, you will need to follow them again. > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) Docker is supported on the following versions of Ubuntu: -- [*Ubuntu Precise 12.04 (LTS) (64-bit)*](#ubuntu-precise-1204-lts-64-bit) -- [*Ubuntu Raring 13.04 and Saucy 13.10 (64 - bit)*](#ubuntu-raring-1304-and-saucy-1310-64-bit) + - [*Ubuntu Precise 12.04 (LTS) (64-bit)*](#ubuntu-precise-1204-lts-64-bit) + - [*Ubuntu Raring 13.04 and Saucy 13.10 (64 + bit)*](#ubuntu-raring-1304-and-saucy-1310-64-bit) Please read [*Docker and UFW*](#docker-and-ufw), if you plan to use [UFW (Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW) @@ -32,12 +32,12 @@ This installation path should work at all times. **Linux kernel 3.8** Due to a bug in LXC, Docker works best on the 3.8 kernel. Precise comes -with a 3.2 kernel, so we need to upgrade it. The kernel you’ll install +with a 3.2 kernel, so we need to upgrade it. The kernel you'll install when following these steps comes with AUFS built in. We also include the generic headers to enable packages that depend on them, like ZFS and the -VirtualBox guest additions. If you didn’t install the headers for your +VirtualBox guest additions. If you didn't install the headers for your "precise" kernel, then you can skip these headers for the "raring" -kernel. But it is safer to include them if you’re not sure. +kernel. But it is safer to include them if you're not sure. # install the backported kernel sudo apt-get update @@ -59,7 +59,7 @@ faster for you to install. First, check that your APT system can deal with `https` URLs: the file `/usr/lib/apt/methods/https` -should exist. If it doesn’t, you need to install the package +should exist. If it doesn't, you need to install the package `apt-transport-https`. [ -e /usr/lib/apt/methods/https ] || { @@ -74,7 +74,7 @@ Then, add the Docker repository key to your local keychain. Add the Docker repository to your apt sources list, update and install the `lxc-docker` package. -*You may receive a warning that the package isn’t trusted. Answer yes to +*You may receive a warning that the package isn't trusted. Answer yes to continue installation.* sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\ @@ -106,9 +106,9 @@ These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10. **Optional AUFS filesystem support** -Ubuntu Raring already comes with the 3.8 kernel, so we don’t need to +Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems have AUFS filesystem support -enabled. AUFS support is optional as of version 0.7, but it’s still +enabled. AUFS support is optional as of version 0.7, but it's still available as a driver and we recommend using it if you can. To make sure AUFS is installed, run the following commands: @@ -160,7 +160,7 @@ Unix group called *docker* and add users to it, then the socket read/writable by the *docker* group when the daemon starts. The `docker` daemon must always run as the root user, but if you run the `docker` client as a user in the -*docker* group then you don’t need to add `sudo` to +*docker* group then you don't need to add `sudo` to all the client commands. As of 0.9.0, you can specify that a group other than `docker` should own the Unix socket with the `-G` option. @@ -245,7 +245,7 @@ Then reload UFW: sudo ufw reload -UFW’s default set of rules denies all incoming traffic. If you want to +UFW's default set of rules denies all incoming traffic. If you want to be able to reach your containers from another host then you should allow incoming connections on the Docker port (default 4243): @@ -263,7 +263,7 @@ warning: WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : [8.8.8.8 8.8.4.4] -This warning is shown because the containers can’t use the local DNS +This warning is shown because the containers can't use the local DNS nameserver and Docker will default to using an external nameserver. This can be worked around by specifying a DNS server to be used by the @@ -281,7 +281,7 @@ The Docker daemon has to be restarted: sudo restart docker > **Warning**: -> If you’re doing this on a laptop which connects to various networks, +> If you're doing this on a laptop which connects to various networks, > make sure to choose a public DNS server. An alternative solution involves disabling dnsmasq in NetworkManager by @@ -310,10 +310,10 @@ you. ### Yandex [Yandex](http://yandex.ru/) in Russia is mirroring the Docker Debian -packages, updating every 6 hours. Substitute -`http://mirror.yandex.ru/mirrors/docker/` for -`http://get.docker.io/ubuntu` in the instructions -above. For example: +packages, updating every 6 hours. +Substitute `http://mirror.yandex.ru/mirrors/docker/` for +`http://get.docker.io/ubuntu` in the instructions above. +For example: sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\ > /etc/apt/sources.list.d/docker.list" diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index cadecdaddb..a5730862ad 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -6,57 +6,54 @@ page_keywords: Docker, Docker documentation, Windows, requirements, virtualbox, Docker can run on Windows using a virtualization platform like VirtualBox. A Linux distribution is run inside a virtual machine and -that’s where Docker will run. +that's where Docker will run. ## Installation > **Note**: -> Docker is still under heavy development! We don’t recommend using it in -> production yet, but we’re getting closer with each release. Please see +> Docker is still under heavy development! We don't recommend using it in +> production yet, but we're getting closer with each release. Please see > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) -1. Install virtualbox from - [https://www.virtualbox.org](https://www.virtualbox.org) - or follow - this - [tutorial](http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7). -2. Download the latest boot2docker.iso from - [https://github.com/boot2docker/boot2docker/releases](https://github.com/boot2docker/boot2docker/releases). -3. Start VirtualBox. -4. Create a new Virtual machine with the following settings: +1. Install virtualbox from [https://www.virtualbox.org]( + https://www.virtualbox.org) - or follow this [tutorial]( + http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7). +2. Download the latest boot2docker.iso from + [https://github.com/boot2docker/boot2docker/releases]( + https://github.com/boot2docker/boot2docker/releases). +3. Start VirtualBox. +4. Create a new Virtual machine with the following settings: -> - Name: boot2docker -> - Type: Linux -> - Version: Linux 2.6 (64 bit) -> - Memory size: 1024 MB -> - Hard drive: Do not add a virtual hard drive + - Name: boot2docker + - Type: Linux + - Version: Linux 2.6 (64 bit) + - Memory size: 1024 MB + - Hard drive: Do not add a virtual hard drive -5. Open the settings of the virtual machine: +5. Open the settings of the virtual machine: 5.1. go to Storage - 5.2. click the empty slot below Controller: IDE - 5.3. click the disc icon on the right of IDE Secondary Master - 5.4. click Choose a virtual CD/DVD disk file -6. Browse to the path where you’ve saved the boot2docker.iso, select - the boot2docker.iso and click open. +6. Browse to the path where you`ve saved the boot2docker.iso, select + the boot2docker.iso and click open. -7. Click OK on the Settings dialog to save the changes and close the - window. +7. Click OK on the Settings dialog to save the changes and close the + window. -8. Start the virtual machine by clicking the green start button. +8. Start the virtual machine by clicking the green start button. -9. The boot2docker virtual machine should boot now. +9. The boot2docker virtual machine should boot now. ## Running Docker boot2docker will log you in automatically so you can start using Docker right away. -Let’s try the “hello world” example. Run +Let's try the “hello world” example. Run docker run busybox echo hello world diff --git a/docs/sources/introduction/technology.md b/docs/sources/introduction/technology.md index 6ae7445595..346a118c39 100644 --- a/docs/sources/introduction/technology.md +++ b/docs/sources/introduction/technology.md @@ -80,7 +80,7 @@ servers. > **Note:** To learn more about the [*Docker Image Index*]( > http://index.docker.io) (public *and* private), check out the [Registry & -> Index Spec](http://docs.docker.io/en/latest/api/registry_index_spec/). +> Index Spec](http://docs.docker.io/api/registry_index_spec/). ### Summary @@ -246,7 +246,7 @@ results and only you and your users can pull them down and use them to build containers. You can [sign up for a plan here](https://index.docker.io/plans). To learn more, check out the [Working With Repositories]( -http://docs.docker.io/en/latest/use/workingwithrepository) section of our +http://docs.docker.io/use/workingwithrepository) section of our [User's Manual](http://docs.docker.io). ## Where to go from here diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index 637030acbc..17ed7ff761 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -10,7 +10,7 @@ page_keywords: docker, introduction, documentation, about, technology, understan > If you prefer a summary and would like to see how a specific command > works, check out the glossary of all available client > commands on our [User's Manual: Commands Reference]( -> http://docs.docker.io/en/latest/reference/commandline/cli). +> http://docs.docker.io/reference/commandline/cli). ## Introduction @@ -164,8 +164,8 @@ image is constructed. dockerfiles/django-uwsgi-nginx Dockerfile and configuration files to buil... 2 [OK] . . . -> **Note:** To learn more about trusted builds, check out [this] -(http://blog.docker.io/2013/11/introducing-trusted-builds) blog post. +> **Note:** To learn more about trusted builds, check out [this]( +http://blog.docker.io/2013/11/introducing-trusted-builds) blog post. ### Downloading an image @@ -279,7 +279,7 @@ The `Dockerfile` holds the set of instructions Docker uses to build a Docker ima > **Tip:** Below is a short summary of our full Dockerfile tutorial. In > order to get a better-grasp of how to work with these automation > scripts, check out the [Dockerfile step-by-step -> tutorial](http://www.docker.io/learn/dockerfile). +> tutorial](https://www.docker.io/learn/dockerfile). A `Dockerfile` contains instructions written in the following format: @@ -294,7 +294,7 @@ A `#` sign is used to provide a comment: > **Tip:** The `Dockerfile` is very flexible and provides a powerful set > of instructions for building applications. To learn more about the > `Dockerfile` and it's instructions see the [Dockerfile -> Reference](http://docs.docker.io/en/latest/reference/builder). +> Reference](http://docs.docker.io/reference/builder/). ### First steps with the Dockerfile diff --git a/docs/sources/reference.md b/docs/sources/reference.md index 3cd720c551..6c1ab462d4 100644 --- a/docs/sources/reference.md +++ b/docs/sources/reference.md @@ -2,8 +2,8 @@ ## Contents: -- [Commands](commandline/) -- [Dockerfile Reference](builder/) -- [Docker Run Reference](run/) -- [APIs](api/) + - [Commands](commandline/) + - [Dockerfile Reference](builder/) + - [Docker Run Reference](run/) + - [APIs](api/) diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md index 7afa5250b3..254db25e92 100644 --- a/docs/sources/reference/api.md +++ b/docs/sources/reference/api.md @@ -1,100 +1,86 @@ # APIs -Your programs and scripts can access Docker’s functionality via these +Your programs and scripts can access Docker's functionality via these interfaces: -- [Registry & Index Spec](registry_index_spec/) - - [1. The 3 roles](registry_index_spec/#the-3-roles) - - [1.1 Index](registry_index_spec/#index) - - [1.2 Registry](registry_index_spec/#registry) - - [1.3 Docker](registry_index_spec/#docker) + - [Registry & Index Spec](registry_index_spec/) + - [1. The 3 roles](registry_index_spec/#the-3-roles) + - [1.1 Index](registry_index_spec/#index) + - [1.2 Registry](registry_index_spec/#registry) + - [1.3 Docker](registry_index_spec/#docker) - - [2. Workflow](registry_index_spec/#workflow) - - [2.1 Pull](registry_index_spec/#pull) - - [2.2 Push](registry_index_spec/#push) - - [2.3 Delete](registry_index_spec/#delete) + - [2. Workflow](registry_index_spec/#workflow) + - [2.1 Pull](registry_index_spec/#pull) + - [2.2 Push](registry_index_spec/#push) + - [2.3 Delete](registry_index_spec/#delete) - - [3. How to use the Registry in standalone - mode](registry_index_spec/#how-to-use-the-registry-in-standalone-mode) - - [3.1 Without an - Index](registry_index_spec/#without-an-index) - - [3.2 With an Index](registry_index_spec/#with-an-index) + - [3. How to use the Registry in standalone mode](registry_index_spec/#how-to-use-the-registry-in-standalone-mode) + - [3.1 Without an Index](registry_index_spec/#without-an-index) + - [3.2 With an Index](registry_index_spec/#with-an-index) - - [4. The API](registry_index_spec/#the-api) - - [4.1 Images](registry_index_spec/#images) - - [4.2 Users](registry_index_spec/#users) - - [4.3 Tags (Registry)](registry_index_spec/#tags-registry) - - [4.4 Images (Index)](registry_index_spec/#images-index) - - [4.5 Repositories](registry_index_spec/#repositories) + - [4. The API](registry_index_spec/#the-api) + - [4.1 Images](registry_index_spec/#images) + - [4.2 Users](registry_index_spec/#users) + - [4.3 Tags (Registry)](registry_index_spec/#tags-registry) + - [4.4 Images (Index)](registry_index_spec/#images-index) + - [4.5 Repositories](registry_index_spec/#repositories) - - [5. Chaining - Registries](registry_index_spec/#chaining-registries) - - [6. Authentication & - Authorization](registry_index_spec/#authentication-authorization) - - [6.1 On the Index](registry_index_spec/#on-the-index) - - [6.2 On the Registry](registry_index_spec/#on-the-registry) + - [5. Chaining Registries](registry_index_spec/#chaining-registries) + - [6. Authentication & Authorization](registry_index_spec/#authentication-authorization) + - [6.1 On the Index](registry_index_spec/#on-the-index) + - [6.2 On the Registry](registry_index_spec/#on-the-registry) - - [7 Document Version](registry_index_spec/#document-version) + - [7 Document Version](registry_index_spec/#document-version) -- [Docker Registry API](registry_api/) - - [1. Brief introduction](registry_api/#brief-introduction) - - [2. Endpoints](registry_api/#endpoints) - - [2.1 Images](registry_api/#images) - - [2.2 Tags](registry_api/#tags) - - [2.3 Repositories](registry_api/#repositories) - - [2.4 Status](registry_api/#status) + - [Docker Registry API](registry_api/) + - [1. Brief introduction](registry_api/#brief-introduction) + - [2. Endpoints](registry_api/#endpoints) + - [2.1 Images](registry_api/#images) + - [2.2 Tags](registry_api/#tags) + - [2.3 Repositories](registry_api/#repositories) + - [2.4 Status](registry_api/#status) - - [3 Authorization](registry_api/#authorization) + - [3 Authorization](registry_api/#authorization) -- [Docker Index API](index_api/) - - [1. Brief introduction](index_api/#brief-introduction) - - [2. Endpoints](index_api/#endpoints) - - [2.1 Repository](index_api/#repository) - - [2.2 Users](index_api/#users) - - [2.3 Search](index_api/#search) + - [Docker Index API](index_api/) + - [1. Brief introduction](index_api/#brief-introduction) + - [2. Endpoints](index_api/#endpoints) + - [2.1 Repository](index_api/#repository) + - [2.2 Users](index_api/#users) + - [2.3 Search](index_api/#search) -- [Docker Remote API](docker_remote_api/) - - [1. Brief introduction](docker_remote_api/#brief-introduction) - - [2. Versions](docker_remote_api/#versions) - - [v1.11](docker_remote_api/#v1-11) - - [v1.10](docker_remote_api/#v1-10) - - [v1.9](docker_remote_api/#v1-9) - - [v1.8](docker_remote_api/#v1-8) - - [v1.7](docker_remote_api/#v1-7) - - [v1.6](docker_remote_api/#v1-6) - - [v1.5](docker_remote_api/#v1-5) - - [v1.4](docker_remote_api/#v1-4) - - [v1.3](docker_remote_api/#v1-3) - - [v1.2](docker_remote_api/#v1-2) - - [v1.1](docker_remote_api/#v1-1) - - [v1.0](docker_remote_api/#v1-0) + - [Docker Remote API](docker_remote_api/) + - [1. Brief introduction](docker_remote_api/#brief-introduction) + - [2. Versions](docker_remote_api/#versions) + - [v1.11](docker_remote_api/#v1-11) + - [v1.10](docker_remote_api/#v1-10) + - [v1.9](docker_remote_api/#v1-9) + - [v1.8](docker_remote_api/#v1-8) + - [v1.7](docker_remote_api/#v1-7) + - [v1.6](docker_remote_api/#v1-6) + - [v1.5](docker_remote_api/#v1-5) + - [v1.4](docker_remote_api/#v1-4) + - [v1.3](docker_remote_api/#v1-3) + - [v1.2](docker_remote_api/#v1-2) + - [v1.1](docker_remote_api/#v1-1) + - [v1.0](docker_remote_api/#v1-0) -- [Docker Remote API Client Libraries](remote_api_client_libraries/) -- [docker.io OAuth API](docker_io_oauth_api/) - - [1. Brief introduction](docker_io_oauth_api/#brief-introduction) - - [2. Register Your - Application](docker_io_oauth_api/#register-your-application) - - [3. Endpoints](docker_io_oauth_api/#endpoints) - - [3.1 Get an Authorization - Code](docker_io_oauth_api/#get-an-authorization-code) - - [3.2 Get an Access - Token](docker_io_oauth_api/#get-an-access-token) - - [3.3 Refresh a Token](docker_io_oauth_api/#refresh-a-token) + - [Docker Remote API Client Libraries](remote_api_client_libraries/) + - [docker.io OAuth API](docker_io_oauth_api/) + - [1. Brief introduction](docker_io_oauth_api/#brief-introduction) + - [2. Register Your Application](docker_io_oauth_api/#register-your-application) + - [3. Endpoints](docker_io_oauth_api/#endpoints) + - [3.1 Get an Authorization Code](docker_io_oauth_api/#get-an-authorization-code) + - [3.2 Get an Access Token](docker_io_oauth_api/#get-an-access-token) + - [3.3 Refresh a Token](docker_io_oauth_api/#refresh-a-token) - - [4. Use an Access Token with the - API](docker_io_oauth_api/#use-an-access-token-with-the-api) + - [4. Use an Access Token with the API](docker_io_oauth_api/#use-an-access-token-with-the-api) -- [docker.io Accounts API](docker_io_accounts_api/) - - [1. Endpoints](docker_io_accounts_api/#endpoints) - - [1.1 Get a single - user](docker_io_accounts_api/#get-a-single-user) - - [1.2 Update a single - user](docker_io_accounts_api/#update-a-single-user) - - [1.3 List email addresses for a - user](docker_io_accounts_api/#list-email-addresses-for-a-user) - - [1.4 Add email address for a - user](docker_io_accounts_api/#add-email-address-for-a-user) - - [1.5 Update an email address for a - user](docker_io_accounts_api/#update-an-email-address-for-a-user) - - [1.6 Delete email address for a - user](docker_io_accounts_api/#delete-email-address-for-a-user) \ No newline at end of file + - [docker.io Accounts API](docker_io_accounts_api/) + - [1. Endpoints](docker_io_accounts_api/#endpoints) + - [1.1 Get a single user](docker_io_accounts_api/#get-a-single-user) + - [1.2 Update a single user](docker_io_accounts_api/#update-a-single-user) + - [1.3 List email addresses for a user](docker_io_accounts_api/#list-email-addresses-for-a-user) + - [1.4 Add email address for a user](docker_io_accounts_api/#add-email-address-for-a-user) + - [1.5 Update an email address for a user](docker_io_accounts_api/#update-an-email-address-for-a-user) + - [1.6 Delete email address for a user](docker_io_accounts_api/#delete-email-address-for-a-user) \ No newline at end of file diff --git a/docs/sources/reference/api/README.md b/docs/sources/reference/api/README.md index ec42b89733..a7b8ae1b44 100644 --- a/docs/sources/reference/api/README.md +++ b/docs/sources/reference/api/README.md @@ -1,6 +1,9 @@ This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes: -* The remote API by which a docker node can be queried over HTTP -* The registry API by which a docker node can download and upload container images for storage and sharing -* The index search API by which a docker node can search the public index for images to download -* The docker.io OAuth and accounts API which 3rd party services can use to access account information + * The remote API by which a docker node can be queried over HTTP + * The registry API by which a docker node can download and upload + container images for storage and sharing + * The index search API by which a docker node can search the public + index for images to download + * The docker.io OAuth and accounts API which 3rd party services can + use to access account information diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.0.md b/docs/sources/reference/api/archive/docker_remote_api_v1.0.md index 8f94733584..dffee87dca 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.0.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.0.md @@ -2,9 +2,9 @@ page_title: Remote API v1.0 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.0](#id1) +# Docker Remote API v1.0 -## [1. Brief introduction](#id2) +# 1. Brief introduction - The Remote API is replacing rcli - Default port in the docker daemon is 4243 @@ -12,14 +12,15 @@ page_keywords: API, Docker, rcli, REST, documentation or pull, the HTTP connection is hijacked to transport stdout stdin and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -80,10 +81,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -126,7 +128,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -135,10 +137,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -202,10 +205,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id8) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -237,10 +241,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id9) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -259,10 +264,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id10) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -278,10 +284,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id11) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -303,10 +310,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id12) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -328,10 +336,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id13) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -347,10 +356,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id14) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -385,11 +395,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id15) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -408,10 +418,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id16) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -435,13 +446,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id17) +## 2.2 Images -#### [List Images](#id18) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -507,11 +518,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id19) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -539,11 +550,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id20) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -560,10 +571,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id21) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -607,10 +619,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id22) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -640,10 +653,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id23) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry > **Example request**: > @@ -668,10 +682,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id24) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -695,10 +710,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Remove an image](#id25) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -714,10 +730,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Search images](#id26) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -747,12 +764,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id27) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id28) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -778,10 +796,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Get default username and email](#id29) +### Get default username and email - `GET /auth` -: Get the default username and email +`GET /auth` + +Get the default username and email **Example request**: @@ -802,10 +821,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration and store it](#id30) +### Check auth configuration and store it - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -828,10 +848,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id31) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -857,10 +878,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id32) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -882,10 +904,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id33) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes > > **Example request**: @@ -913,7 +936,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -921,28 +944,28 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -## [3. Going further](#id34) +# 3. Going further -### [3.1 Inside ‘docker run’](#id35) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's -### [3.2 Hijacking](#id36) +## 3.2 Hijacking In this first version of the API, some of the endpoints, like /attach, /pull or /push uses hijacking to transport stdin, stdout and stderr on diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.1.md b/docs/sources/reference/api/archive/docker_remote_api_v1.1.md index 71d2f91d37..32220e79cf 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.1.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.1.md @@ -2,9 +2,9 @@ page_title: Remote API v1.1 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.1](#id1) +# Docker Remote API v1.1 -## [1. Brief introduction](#id2) +# 1. Brief introduction - The Remote API is replacing rcli - Default port in the docker daemon is 4243 @@ -12,14 +12,15 @@ page_keywords: API, Docker, rcli, REST, documentation or pull, the HTTP connection is hijacked to transport stdout stdin and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -80,10 +81,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -126,7 +128,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -135,10 +137,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -202,10 +205,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id8) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -237,10 +241,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id9) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -259,10 +264,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id10) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -278,10 +284,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id11) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -303,10 +310,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id12) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -328,10 +336,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id13) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -347,10 +356,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id14) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -385,11 +395,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id15) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -408,10 +418,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id16) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -435,13 +446,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id17) +## 2.2 Images -#### [List Images](#id18) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -507,11 +518,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id19) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -542,11 +553,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id20) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -567,10 +578,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id21) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -614,10 +626,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id22) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -647,10 +660,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id23) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry > **Example request**: > @@ -678,10 +692,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id24) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -706,10 +721,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id25) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -725,10 +741,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Search images](#id26) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -758,12 +775,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id27) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id28) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -789,10 +807,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Get default username and email](#id29) +### Get default username and email - `GET /auth` -: Get the default username and email +`GET /auth` + +Get the default username and email **Example request**: @@ -813,10 +832,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration and store it](#id30) +### Check auth configuration and store it - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -839,10 +859,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id31) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -868,10 +889,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id32) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -893,10 +915,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id33) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -924,7 +947,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -932,28 +955,28 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -## [3. Going further](#id34) +# 3. Going further -### [3.1 Inside ‘docker run’](#id35) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's -### [3.2 Hijacking](#id36) +## 3.2 Hijacking In this version of the API, /attach uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.2.md b/docs/sources/reference/api/archive/docker_remote_api_v1.2.md index 0239de6681..19703a0028 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.2.md @@ -2,24 +2,25 @@ page_title: Remote API v1.2 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.2](#id1) +# Docker Remote API v1.2 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr +- The Remote API is replacing rcli +- Default port in the docker daemon is 4243 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -92,10 +93,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -138,7 +140,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -147,10 +149,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -214,10 +217,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id8) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -249,10 +253,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id9) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -271,10 +276,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id10) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -290,10 +296,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id11) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -315,10 +322,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id12) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -340,10 +348,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id13) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -359,10 +368,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id14) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -397,11 +407,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id15) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -420,10 +430,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id16) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -447,13 +458,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id17) +## 2.2 Images -#### [List Images](#id18) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -523,11 +534,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id19) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -558,11 +569,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id20) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -583,10 +594,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id21) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -631,10 +643,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id22) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -665,10 +678,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id23) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry > **Example request**: > @@ -697,10 +711,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id24) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -725,10 +740,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id25) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -752,10 +768,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id26) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -785,12 +802,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id27) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id28) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile +`POST /build` + +Build an image from Dockerfile **Example request**: @@ -820,10 +838,11 @@ page_keywords: API, Docker, rcli, REST, documentation {{ STREAM }} is the raw text output of the build command. It uses the HTTP Hijack method in order to stream. -#### [Check auth configuration](#id29) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -853,10 +872,11 @@ HTTP Hijack method in order to stream. - **403** – forbidden - **500** – server error -#### [Display system-wide information](#id30) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -882,10 +902,11 @@ HTTP Hijack method in order to stream. - **200** – no error - **500** – server error -#### [Show the docker version information](#id31) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -907,10 +928,11 @@ HTTP Hijack method in order to stream. - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id32) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -938,7 +960,7 @@ HTTP Hijack method in order to stream. - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -946,33 +968,33 @@ HTTP Hijack method in order to stream. - **404** – no such container - **500** – server error -## [3. Going further](#id33) +# 3. Going further -### [3.1 Inside ‘docker run’](#id34) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's -### [3.2 Hijacking](#id35) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id36) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.3.md b/docs/sources/reference/api/archive/docker_remote_api_v1.3.md index d83b9d85b1..510719ee00 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.3.md @@ -2,24 +2,25 @@ page_title: Remote API v1.3 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.3](#id1) +# Docker Remote API v1.3 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr +- The Remote API is replacing rcli +- Default port in the docker daemon is 4243 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -94,10 +95,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -140,7 +142,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -149,10 +151,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -216,10 +219,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -251,10 +255,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -286,10 +291,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -308,10 +314,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -331,7 +338,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -339,10 +346,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -364,10 +372,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -389,10 +398,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -408,10 +418,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -446,11 +457,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -469,10 +480,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -496,13 +508,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id18) +## 2.2 Images -#### [List Images](#id19) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -572,11 +584,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id20) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -607,11 +619,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id21) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -632,10 +644,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id22) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -680,10 +693,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id23) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -713,10 +727,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id24) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry > **Example request**: > @@ -745,10 +760,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id25) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -773,10 +789,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id26) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -800,10 +817,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id27) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -833,12 +851,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id28) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id29) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -873,10 +892,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id30) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -899,10 +919,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id31) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -931,10 +952,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id32) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -956,10 +978,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id33) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -987,7 +1010,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -995,11 +1018,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id34) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1026,33 +1050,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## [3. Going further](#id35) +# 3. Going further -### [3.1 Inside ‘docker run’](#id36) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's id -### [3.2 Hijacking](#id37) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id38) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md index 32733708b3..a7d52de871 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md @@ -2,24 +2,25 @@ page_title: Remote API v1.4 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.4](#id1) +# Docker Remote API v1.4 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr +- The Remote API is replacing rcli +- Default port in the docker daemon is 4243 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -94,10 +95,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -143,7 +145,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -152,10 +154,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -222,10 +225,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict between containers and images - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -260,7 +264,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -268,10 +272,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -303,10 +308,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -325,10 +331,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -349,7 +356,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -357,10 +364,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -382,10 +390,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -407,10 +416,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -426,10 +436,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -464,11 +475,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -487,10 +498,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -514,10 +526,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Copy files or folders from a container](#id18) +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -541,13 +554,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id19) +## 2.2 Images -#### [List Images](#id20) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -617,11 +630,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id21) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -652,11 +665,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id22) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -677,10 +690,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id23) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -727,10 +741,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict between containers and images - **500** – server error -#### [Get the history of an image](#id24) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -760,10 +775,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id25) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -789,10 +805,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error :statuscode 404: no such image :statuscode 500: server error -#### [Tag an image into a repository](#id26) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -817,10 +834,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id27) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -844,10 +862,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id28) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -877,12 +896,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id29) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id30) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -918,10 +938,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id31) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -945,10 +966,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id32) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -975,35 +997,38 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id33) +### Show the docker version information - `GET /version` -: Show the docker version information - > - > **Example request**: - > - > GET /version HTTP/1.1 - > - > **Example response**: - > - > HTTP/1.1 200 OK - > Content-Type: application/json - > - > { - > "Version":"0.2.2", - > "GitCommit":"5a2a5cc+CHANGES", - > "GoVersion":"go1.0.3" - > } +`GET /version` + +Show the docker version information + + + **Example request**: + + GET /version HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version":"0.2.2", + "GitCommit":"5a2a5cc+CHANGES", + "GoVersion":"go1.0.3" + } Status Codes: - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id34) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1031,7 +1056,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -1039,11 +1064,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id35) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1070,33 +1096,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## [3. Going further](#id36) +# 3. Going further -### [3.1 Inside ‘docker run’](#id37) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's id -### [3.2 Hijacking](#id38) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id39) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md index adef571b3f..c9fd854f44 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md @@ -2,24 +2,25 @@ page_title: Remote API v1.5 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.5](#id1) +# Docker Remote API v1.5 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr +- The Remote API is replacing rcli +- Default port in the docker daemon is 4243 +- The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -94,10 +95,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -142,7 +144,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -151,10 +153,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -219,10 +222,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -257,7 +261,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -265,10 +269,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -300,10 +305,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -322,10 +328,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -346,7 +353,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -354,10 +361,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -379,10 +387,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -404,10 +413,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -423,10 +433,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -461,11 +472,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -484,10 +495,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -511,10 +523,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Copy files or folders from a container](#id18) +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -538,13 +551,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id19) +## 2.2 Images -#### [List Images](#id20) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -614,11 +627,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id21) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -653,11 +666,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id22) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -678,10 +691,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id23) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -727,10 +741,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id24) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -760,10 +775,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id25) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -794,10 +810,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id26) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -822,10 +839,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id27) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -849,10 +867,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id28) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -889,12 +908,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -### [2.3 Misc](#id29) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id30) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -931,10 +951,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id31) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -958,10 +979,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id32) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -988,10 +1010,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id33) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1013,10 +1036,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id34) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1044,7 +1068,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -1052,11 +1076,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id35) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1083,28 +1108,28 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## [3. Going further](#id36) +# 3. Going further -### [3.1 Inside ‘docker run’](#id37) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run`: -- Create the container -- If the status code is 404, it means the image doesn’t exists: \* Try - to pull it \* Then retry to create the container -- Start the container -- If you are not in detached mode: \* Attach to the container, using - logs=1 (to have stdout and stderr from the container’s start) and - stream=1 -- If in detached mode or only stdin is attached: \* Display the - container’s id + - Create the container + - If the status code is 404, it means the image doesn't exists: + Try to pull it - Then retry to create the container + - Start the container + - If you are not in detached mode: + Attach to the container, using logs=1 (to have stdout and stderr + from the container's start) and stream=1 + - If in detached mode or only stdin is attached: + Display the container's id -### [3.2 Hijacking](#id38) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id39) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md index 5bd0e46d50..47c2b82e27 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md @@ -2,27 +2,27 @@ page_title: Remote API v1.6 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.6](#id1) +# Docker Remote API v1.6 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -144,7 +145,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Query Parameters: @@ -202,10 +203,11 @@ page_keywords: API, Docker, rcli, REST, documentation **Now you can ssh into your new container on port 11022.** -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -271,10 +273,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -309,7 +312,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -317,10 +320,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -352,10 +356,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -374,10 +379,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -403,7 +409,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -411,10 +417,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -436,10 +443,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -461,10 +469,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -488,10 +497,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -530,8 +540,8 @@ page_keywords: API, Docker, rcli, REST, documentation When using the TTY setting is enabled in [`POST /containers/create` -](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), - the stream is the raw data from the process PTY and client’s stdin. + ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -570,11 +580,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -593,10 +603,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -620,10 +631,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Copy files or folders from a container](#id18) +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -647,13 +659,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id19) +## 2.2 Images -#### [List Images](#id20) +### List Images - `GET /images/`(*format*) -: List images `format` could be json or viz (json - default) +`GET /images/(format)` + +List images `format` could be json or viz (json default) **Example request**: @@ -723,11 +735,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create an image](#id21) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -762,11 +774,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id22) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -787,10 +799,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id23) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -836,10 +849,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id24) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -869,10 +883,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id25) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -900,10 +915,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error :statuscode 404: no such image :statuscode 500: server error -#### [Tag an image into a repository](#id26) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -928,10 +944,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id27) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -955,10 +972,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id28) +### Search images - `GET /images/search` -: Search for an image in the docker index +`GET /images/search` + +Search for an image in the docker index **Example request**: @@ -988,12 +1006,13 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -### [2.3 Misc](#id29) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id30) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -1029,10 +1048,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id31) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1056,10 +1076,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id32) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1086,10 +1107,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id33) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1111,10 +1133,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id34) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1142,7 +1165,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") Status Codes: @@ -1150,11 +1173,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id35) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1181,33 +1205,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## [3. Going further](#id36) +# 3. Going further -### [3.1 Inside ‘docker run’](#id37) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it +- If the status code is 404, it means the image doesn't exists: + - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - : - Display the container’s id + - Display the container's id -### [3.2 Hijacking](#id38) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id39) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md index ac02aa5d0e..ccc973925a 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md @@ -2,27 +2,27 @@ page_title: Remote API v1.7 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.7](#id1) +# Docker Remote API v1.7 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -149,7 +150,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Status Codes: @@ -158,10 +159,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -227,10 +229,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -265,7 +268,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -273,10 +276,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -308,10 +312,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -330,10 +335,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -360,7 +366,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -368,10 +374,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -393,10 +400,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -418,10 +426,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -437,10 +446,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -479,8 +489,8 @@ page_keywords: API, Docker, rcli, REST, documentation When using the TTY setting is enabled in [`POST /containers/create` -](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), - the stream is the raw data from the process PTY and client’s stdin. + ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -519,11 +529,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -542,10 +552,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -569,10 +580,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Copy files or folders from a container](#id18) +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -596,12 +608,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id19) +## 2.2 Images -#### [List Images](#id20) +### List Images - `GET /images/json` -: **Example request**: +`GET /images/json` + +**Example request**: GET /images/json?all=0 HTTP/1.1 @@ -635,11 +648,11 @@ page_keywords: API, Docker, rcli, REST, documentation } ] -#### [Create an image](#id21) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -680,11 +693,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id22) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -705,10 +718,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id23) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -754,10 +768,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id24) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -787,10 +802,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id25) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -825,10 +841,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id26) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -853,10 +870,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id27) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -880,14 +898,15 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id28) +### Search images - `GET /images/search` -: Search for an image in the docker index. +`GET /images/search` + +Search for an image in the docker index. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON -> sent by the registry server to the docker daemon’s request. +> sent by the registry server to the docker daemon's request. **Example request**: @@ -934,12 +953,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -### [2.3 Misc](#id29) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id30) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -958,7 +978,7 @@ page_keywords: API, Docker, rcli, REST, documentation following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` - at its root. It may include any number of other files, + at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../../builder/#dockerbuilder)). @@ -983,10 +1003,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id31) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1010,10 +1031,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id32) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1040,10 +1062,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id33) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1065,10 +1088,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id34) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1090,7 +1114,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **run** – config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) @@ -1100,11 +1124,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id35) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1131,11 +1156,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Get a tarball containing all images and tags in a repository](#id36) +### Get a tarball containing all images and tags in a repository - `GET /images/`(*name*)`/get` -: Get a tarball containing all images and metadata for the repository - specified by `name`. +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. **Example request** @@ -1152,10 +1178,11 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -#### [Load a tarball with a set of images and tags into docker](#id37) +### Load a tarball with a set of images and tags into docker - `POST /images/load` -: Load a set of images and tags into the docker repository. +`POST /images/load` + +Load a set of images and tags into the docker repository. **Example request** @@ -1172,33 +1199,33 @@ page_keywords: API, Docker, rcli, REST, documentation :statuscode 200: no error :statuscode 500: server error -## [3. Going further](#id38) +# 3. Going further -### [3.1 Inside ‘docker run’](#id39) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it +- If the status code is 404, it means the image doesn't exists: + - Try to pull it - Then retry to create the container - Start the container - If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 - If in detached mode or only stdin is attached: - : - Display the container’s id + - Display the container's id -### [3.2 Hijacking](#id40) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id41) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md index eb29699e62..4bc4d01638 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md @@ -2,27 +2,27 @@ page_title: Remote API v1.8 page_description: API Documentation for Docker page_keywords: API, Docker, rcli, REST, documentation -# [Docker Remote API v1.8](#id1) +# Docker Remote API v1.8 -## [1. Brief introduction](#id2) +# 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## [2. Endpoints](#id3) +# 2. Endpoints -### [2.1 Containers](#id4) +## 2.1 Containers -#### [List containers](#id5) +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### [Create a container](#id6) +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -179,11 +180,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### [Inspect a container](#id7) +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` +Return low-level information on the container `id` **Example request**: @@ -264,10 +265,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [List processes running inside a container](#id8) +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -302,7 +304,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -310,10 +312,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Inspect changes on a container’s filesystem](#id9) +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -345,10 +348,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Export a container](#id10) +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -367,10 +371,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Start a container](#id11) +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -411,10 +416,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Stop a container](#id12) +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -436,10 +442,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Restart a container](#id13) +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -461,10 +468,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Kill a container](#id14) +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -480,10 +488,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Attach to a container](#id15) +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -522,8 +531,8 @@ page_keywords: API, Docker, rcli, REST, documentation When using the TTY setting is enabled in [`POST /containers/create` -](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), - the stream is the raw data from the process PTY and client’s stdin. + ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -562,11 +571,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### [Wait a container](#id16) +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -585,10 +594,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Remove a container](#id17) +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -612,10 +622,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Copy files or folders from a container](#id18) +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -639,12 +650,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### [2.2 Images](#id19) +## 2.2 Images -#### [List Images](#id20) +### List Images - `GET /images/json` -: **Example request**: +`GET /images/json` + +**Example request**: GET /images/json?all=0 HTTP/1.1 @@ -678,11 +690,11 @@ page_keywords: API, Docker, rcli, REST, documentation } ] -#### [Create an image](#id21) +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -723,11 +735,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Insert a file in an image](#id22) +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -748,10 +760,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Inspect an image](#id23) +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -797,10 +810,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Get the history of an image](#id24) +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -830,10 +844,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Push an image on the registry](#id25) +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -868,10 +883,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### [Tag an image into a repository](#id26) +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -896,10 +912,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Remove an image](#id27) +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -923,14 +940,15 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### [Search images](#id28) +### Search images - `GET /images/search` -: Search for an image in the docker index. +`GET /images/search` + +Search for an image in the docker index. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON -> sent by the registry server to the docker daemon’s request. +> sent by the registry server to the docker daemon's request. **Example request**: @@ -977,12 +995,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -### [2.3 Misc](#id29) +## 2.3 Misc -#### [Build an image from Dockerfile via stdin](#id30) +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -1003,7 +1022,7 @@ page_keywords: API, Docker, rcli, REST, documentation following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` - at its root. It may include any number of other files, + at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../../builder/#dockerbuilder)). @@ -1029,10 +1048,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Check auth configuration](#id31) +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1056,10 +1076,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### [Display system-wide information](#id32) +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1086,10 +1107,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Show the docker version information](#id33) +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1111,10 +1133,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Create a new image from a container’s changes](#id34) +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1136,7 +1159,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **run** – config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) @@ -1146,11 +1169,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### [Monitor Docker’s events](#id35) +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, +or via polling (using since) **Example request**: @@ -1177,11 +1201,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Get a tarball containing all images and tags in a repository](#id36) +### Get a tarball containing all images and tags in a repository - `GET /images/`(*name*)`/get` -: Get a tarball containing all images and metadata for the repository - specified by `name`. +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. **Example request** @@ -1199,10 +1224,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### [Load a tarball with a set of images and tags into docker](#id37) +### Load a tarball with a set of images and tags into docker - `POST /images/load` -: Load a set of images and tags into the docker repository. +`POST /images/load` + +Load a set of images and tags into the docker repository. **Example request** @@ -1219,33 +1245,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## [3. Going further](#id38) +# 3. Going further -### [3.1 Inside ‘docker run’](#id39) +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run`: -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's id -### [3.2 Hijacking](#id40) +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### [3.3 CORS Requests](#id41) +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/docker_io_accounts_api.md b/docs/sources/reference/api/docker_io_accounts_api.md index e5e77dc421..8186e306f8 100644 --- a/docs/sources/reference/api/docker_io_accounts_api.md +++ b/docs/sources/reference/api/docker_io_accounts_api.md @@ -8,8 +8,9 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.1 Get a single user - `GET /api/v1.1/users/:username/` -: Get profile info for the specified user. +`GET /api/v1.1/users/:username/` + +Get profile info for the specified user. Parameters: @@ -61,8 +62,9 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.2 Update a single user - `PATCH /api/v1.1/users/:username/` -: Update profile info for the specified user. +`PATCH /api/v1.1/users/:username/` + +Update profile info for the specified user. Parameters: @@ -73,11 +75,11 @@ page_keywords: API, Docker, accounts, REST, documentation   - - **full\_name** (*string*) – (optional) the new name of the user. + - **full_name** (*string*) – (optional) the new name of the user. - **location** (*string*) – (optional) the new location. - **company** (*string*) – (optional) the new company of the user. - - **profile\_url** (*string*) – (optional) the new profile url. - - **gravatar\_email** (*string*) – (optional) the new Gravatar + - **profile_url** (*string*) – (optional) the new profile url. + - **gravatar_email** (*string*) – (optional) the new Gravatar email address. Request Headers: @@ -134,8 +136,9 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.3 List email addresses for a user - `GET /api/v1.1/users/:username/emails/` -: List email info for the specified user. +`GET /api/v1.1/users/:username/emails/` + +List email info for the specified user. Parameters: @@ -180,10 +183,11 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.4 Add email address for a user - `POST /api/v1.1/users/:username/emails/` -: Add a new email address to the specified user’s account. The email - address must be verified separately, a confirmation email is not - automatically sent. +`POST /api/v1.1/users/:username/emails/` + +Add a new email address to the specified user's account. The email +address must be verified separately, a confirmation email is not +automatically sent. Json Parameters: @@ -235,12 +239,13 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.5 Update an email address for a user - `PATCH /api/v1.1/users/:username/emails/` -: Update an email address for the specified user to either verify an - email address or set it as the primary email for the user. You - cannot use this endpoint to un-verify an email address. You cannot - use this endpoint to unset the primary email, only set another as - the primary. +`PATCH /api/v1.1/users/:username/emails/` + +Update an email address for the specified user to either verify an +email address or set it as the primary email for the user. You +cannot use this endpoint to un-verify an email address. You cannot +use this endpoint to unset the primary email, only set another as +the primary. Parameters: @@ -269,7 +274,7 @@ page_keywords: API, Docker, accounts, REST, documentation Status Codes: - - **200** – success, user’s email updated. + - **200** – success, user's email updated. - **400** – data validation error. - **401** – authentication error. - **403** – permission error, authenticated user must be the user @@ -305,9 +310,10 @@ page_keywords: API, Docker, accounts, REST, documentation ### 1.6 Delete email address for a user - `DELETE /api/v1.1/users/:username/emails/` -: Delete an email address from the specified user’s account. You - cannot delete a user’s primary email address. +`DELETE /api/v1.1/users/:username/emails/` + +Delete an email address from the specified user's account. You +cannot delete a user's primary email address. Json Parameters: @@ -351,5 +357,3 @@ page_keywords: API, Docker, accounts, REST, documentation HTTP/1.1 204 NO CONTENT Content-Length: 0 - - diff --git a/docs/sources/reference/api/docker_io_oauth_api.md b/docs/sources/reference/api/docker_io_oauth_api.md index 3009f08e20..6cc4a6d546 100644 --- a/docs/sources/reference/api/docker_io_oauth_api.md +++ b/docs/sources/reference/api/docker_io_oauth_api.md @@ -27,46 +27,47 @@ request registration of your application send an email to [support-accounts@docker.com](mailto:support-accounts%40docker.com) with the following information: -- The name of your application -- A description of your application and the service it will provide to - docker.io users. -- A callback URI that we will use for redirecting authorization - requests to your application. These are used in the step of getting - an Authorization Code. The domain name of the callback URI will be - visible to the user when they are requested to authorize your - application. + - The name of your application + - A description of your application and the service it will provide to + docker.io users. + - A callback URI that we will use for redirecting authorization + requests to your application. These are used in the step of getting + an Authorization Code. The domain name of the callback URI will be + visible to the user when they are requested to authorize your + application. When your application is approved you will receive a response from the docker.io team with your `client_id` and `client_secret` which your application will use in the steps of getting an Authorization Code and getting an Access Token. -## 3. Endpoints +# 3. Endpoints -### 3.1 Get an Authorization Code +## 3.1 Get an Authorization Code Once You have registered you are ready to start integrating docker.io accounts into your application! The process is usually started by a user following a link in your application to an OAuth Authorization endpoint. - `GET /api/v1.1/o/authorize/` -: Request that a docker.io user authorize your application. If the - user is not already logged in, they will be prompted to login. The - user is then presented with a form to authorize your application for - the requested access scope. On submission, the user will be - redirected to the specified `redirect_uri` with - an Authorization Code. +`GET /api/v1.1/o/authorize/` + +Request that a docker.io user authorize your application. If the +user is not already logged in, they will be prompted to login. The +user is then presented with a form to authorize your application for +the requested access scope. On submission, the user will be +redirected to the specified `redirect_uri` with +an Authorization Code. Query Parameters:   - - **client\_id** – The `client_id` given to + - **client_id** – The `client_id` given to your application at registration. - - **response\_type** – MUST be set to `code`. + - **response_type** – MUST be set to `code`. This specifies that you would like an Authorization Code returned. - - **redirect\_uri** – The URI to redirect back to after the user + - **redirect_uri** – The URI to redirect back to after the user has authorized your application. If omitted, the first of your registered `response_uris` is used. If included, it must be one of the URIs which were submitted when @@ -95,7 +96,7 @@ following a link in your application to an OAuth Authorization endpoint. prompt which asks the user to authorize your application with a description of the requested scopes. - ![](../../../_images/io_oauth_authorization_page.png) + ![](../../../static_files/io_oauth_authorization_page.png) Once the user allows or denies your Authorization Request the user will be redirected back to your application. Included in that @@ -113,34 +114,35 @@ following a link in your application to an OAuth Authorization endpoint. : An error message in the event of the user denying the authorization or some other kind of error with the request. -### 3.2 Get an Access Token +## 3.2 Get an Access Token Once the user has authorized your application, a request will be made to -your application’s specified `redirect_uri` which +your application'sspecified `redirect_uri` which includes a `code` parameter that you must then use to get an Access Token. - `POST /api/v1.1/o/token/` -: Submit your newly granted Authorization Code and your application’s - credentials to receive an Access Token and Refresh Token. The code - is valid for 60 seconds and cannot be used more than once. +`POST /api/v1.1/o/token/` + +Submit your newly granted Authorization Code and your application's +credentials to receive an Access Token and Refresh Token. The code +is valid for 60 seconds and cannot be used more than once. Request Headers:   - **Authorization** – HTTP basic authentication using your - application’s `client_id` and + application's `client_id` and `client_secret` Form Parameters:   - - **grant\_type** – MUST be set to `authorization_code` - - **code** – The authorization code received from the user’s + - **grant_type** – MUST be set to `authorization_code` + - **code** – The authorization code received from the user's redirect request. - - **redirect\_uri** – The same `redirect_uri` + - **redirect_uri** – The same `redirect_uri` used in the authentication request. **Example Request** @@ -177,31 +179,32 @@ to get an Access Token. In the case of an error, there will be a non-200 HTTP Status and and data detailing the error. -### 3.3 Refresh a Token +## 3.3 Refresh a Token Once the Access Token expires you can use your `refresh_token` to have docker.io issue your application a new Access Token, if the user has not revoked access from your application. - `POST /api/v1.1/o/token/` -: Submit your `refresh_token` and application’s - credentials to receive a new Access Token and Refresh Token. The - `refresh_token` can be used only once. +`POST /api/v1.1/o/token/` + +Submit your `refresh_token` and application's +credentials to receive a new Access Token and Refresh Token. The +`refresh_token` can be used only once. Request Headers:   - **Authorization** – HTTP basic authentication using your - application’s `client_id` and + application's `client_id` and `client_secret` Form Parameters:   - - **grant\_type** – MUST be set to `refresh_token` - - **refresh\_token** – The `refresh_token` + - **grant_type** – MUST be set to `refresh_token` + - **refresh_token** – The `refresh_token` which was issued to your application. - **scope** – (optional) The scope of the access token to be returned. Must not include any scope not originally granted by @@ -241,11 +244,10 @@ if the user has not revoked access from your application. In the case of an error, there will be a non-200 HTTP Status and and data detailing the error. -## 4. Use an Access Token with the API +# 4. Use an Access Token with the API Many of the docker.io API requests will require a Authorization request -header field. Simply ensure you add this header with "Bearer -\<`access_token`\>": +header field. Simply ensure you add this header with "Bearer <`access_token`>": GET /api/v1.1/resource HTTP/1.1 Host: docker.io diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 5df7d8938c..3c58b1b990 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -6,31 +6,30 @@ page_keywords: API, Docker, rcli, REST, documentation ## 1. Brief introduction -- The Remote API is replacing rcli -- By default the Docker daemon listens on unix:///var/run/docker.sock - and the client must have root access to interact with the daemon -- If a group named *docker* exists on your system, docker will apply - ownership of the socket to the group -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr -- Since API version 1.2, the auth configuration is now handled client - side, so the client has to send the authConfig as POST in - /images/(name)/push -- authConfig, set as the `X-Registry-Auth` header, - is currently a Base64 encoded (json) string with credentials: - `{'username': string, 'password': string, 'email': string, 'serveraddress' : string}` + - The Remote API is replacing rcli + - By default the Docker daemon listens on unix:///var/run/docker.sock + and the client must have root access to interact with the daemon + - If a group named *docker* exists on your system, docker will apply + ownership of the socket to the group + - The API tends to be REST, but for some complex commands, like attach + or pull, the HTTP connection is hijacked to transport stdout stdin + and stderr + - Since API version 1.2, the auth configuration is now handled client + side, so the client has to send the authConfig as POST in /images/(name)/push + - authConfig, set as the `X-Registry-Auth` header, is currently a Base64 + encoded (json) string with credentials: + `{'username': string, 'password': string, 'email': string, 'serveraddress' : string}` ## 2. Versions The current version of the API is 1.11 -Calling /images/\/insert is the same as calling -/v1.11/images/\/insert +Calling /images//insert is the same as calling +/v1.11/images//insert You can still call an old version of the api using -/v1.11/images/\/insert +/v1.11/images//insert ### v1.11 @@ -38,11 +37,13 @@ You can still call an old version of the api using [*Docker Remote API v1.11*](../docker_remote_api_v1.11/) -#### What’s new +#### What's new - `GET /events` -: **New!** You can now use the `-until` parameter - to close connection after timestamp. +`GET /events` + +**New!** +You can now use the `-until` parameter to close connection +after timestamp. ### v1.10 @@ -50,16 +51,21 @@ You can still call an old version of the api using [*Docker Remote API v1.10*](../docker_remote_api_v1.10/) -#### What’s new +#### What's new - `DELETE /images/`(*name*) -: **New!** You can now use the force parameter to force delete of an - image, even if it’s tagged in multiple repositories. **New!** You +`DELETE /images/(name)` + +**New!** +You can now use the force parameter to force delete of an + image, even if it's tagged in multiple repositories. **New!** + You can now use the noprune parameter to prevent the deletion of parent images - `DELETE /containers/`(*id*) -: **New!** You can now use the force paramter to force delete a +`DELETE /containers/(id)` + +**New!** +You can now use the force paramter to force delete a container, even if it is currently running ### v1.9 @@ -68,51 +74,58 @@ You can still call an old version of the api using [*Docker Remote API v1.9*](../docker_remote_api_v1.9/) -#### What’s new +#### What's new - `POST /build` -: **New!** This endpoint now takes a serialized ConfigFile which it - uses to resolve the proper registry auth credentials for pulling the - base image. Clients which previously implemented the version - accepting an AuthConfig object must be updated. +`POST /build` + +**New!** +This endpoint now takes a serialized ConfigFile which it +uses to resolve the proper registry auth credentials for pulling the +base image. Clients which previously implemented the version +accepting an AuthConfig object must be updated. ### v1.8 #### Full Documentation -#### What’s new +#### What's new - `POST /build` -: **New!** This endpoint now returns build status as json stream. In - case of a build error, it returns the exit status of the failed - command. +`POST /build` - `GET /containers/`(*id*)`/json` -: **New!** This endpoint now returns the host config for the - container. +**New!** +This endpoint now returns build status as json stream. In +case of a build error, it returns the exit status of the failed +command. - `POST /images/create` -: +`GET /containers/(id)/json` - `POST /images/`(*name*)`/insert` -: +**New!** +This endpoint now returns the host config for the +container. - `POST /images/`(*name*)`/push` -: **New!** progressDetail object was added in the JSON. It’s now - possible to get the current value and the total of the progress - without having to parse the string. +`POST /images/create` + +`POST /images/(name)/insert` + +`POST /images/(name)/push` + +**New!** +progressDetail object was added in the JSON. It's now +possible to get the current value and the total of the progress +without having to parse the string. ### v1.7 #### Full Documentation -#### What’s new +#### What's new - `GET /images/json` -: The format of the json returned from this uri changed. Instead of an - entry for each repo/tag on an image, each image is only represented - once, with a nested attribute indicating the repo/tags that apply to - that image. +`GET /images/json` + +The format of the json returned from this uri changed. Instead of an +entry for each repo/tag on an image, each image is only represented +once, with a nested attribute indicating the repo/tags that apply to +that image. Instead of: @@ -192,60 +205,74 @@ You can still call an old version of the api using } ] - `GET /images/viz` -: This URI no longer exists. The `images --viz` - output is now generated in the client, using the - `/images/json` data. +`GET /images/viz` + +This URI no longer exists. The `images --viz` +output is now generated in the client, using the +`/images/json` data. ### v1.6 #### Full Documentation -#### What’s new +#### What's new - `POST /containers/`(*id*)`/attach` -: **New!** You can now split stderr from stdout. This is done by - prefixing a header to each transmition. See - [`POST /containers/(id)/attach` -](../docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach"). - The WebSocket attach is unchanged. Note that attach calls on the - previous API version didn’t change. Stdout and stderr are merged. +`POST /containers/(id)/attach` + +**New!** +You can now split stderr from stdout. This is done by +prefixing a header to each transmition. See +[`POST /containers/(id)/attach`]( +../docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach"). +The WebSocket attach is unchanged. Note that attach calls on the +previous API version didn't change. Stdout and stderr are merged. ### v1.5 #### Full Documentation -#### What’s new +#### What's new - `POST /images/create` -: **New!** You can now pass registry credentials (via an AuthConfig +`POST /images/create` + +**New!** +You can now pass registry credentials (via an AuthConfig object) through the X-Registry-Auth header - `POST /images/`(*name*)`/push` -: **New!** The AuthConfig object now needs to be passed through the +`POST /images/(name)/push` + +**New!** +The AuthConfig object now needs to be passed through the X-Registry-Auth header - `GET /containers/json` -: **New!** The format of the Ports entry has been changed to a list of - dicts each containing PublicPort, PrivatePort and Type describing a - port mapping. +`GET /containers/json` + +**New!** +The format of the Ports entry has been changed to a list of +dicts each containing PublicPort, PrivatePort and Type describing a +port mapping. ### v1.4 #### Full Documentation -#### What’s new +#### What's new - `POST /images/create` -: **New!** When pulling a repo, all images are now downloaded in - parallel. +`POST /images/create` - `GET /containers/`(*id*)`/top` -: **New!** You can now use ps args with docker top, like docker top - \ aux +**New!** +When pulling a repo, all images are now downloaded in parallel. - `GET /events:` -: **New!** Image’s name added in the events +`GET /containers/(id)/top` + +**New!** +You can now use ps args with docker top, like docker top + aux + +`GET /events` + +**New!** +Image's name added in the events ### v1.3 @@ -254,20 +281,23 @@ docker v0.5.0 #### Full Documentation -#### What’s new +#### What's new - `GET /containers/`(*id*)`/top` -: List the processes running inside a container. +`GET /containers/(id)/top` - `GET /events:` -: **New!** Monitor docker’s events via streaming or via polling +List the processes running inside a container. + +`GET /events` + +**New!** +Monitor docker's events via streaming or via polling Builder (/build): -- Simplify the upload of the build context -- Simply stream a tarball instead of multipart upload with 4 - intermediary buffers -- Simpler, less memory usage, less disk usage and faster + - Simplify the upload of the build context + - Simply stream a tarball instead of multipart upload with 4 + intermediary buffers + - Simpler, less memory usage, less disk usage and faster > **Warning**: > The /build improvements are not reverse-compatible. Pre 1.3 clients will @@ -275,12 +305,12 @@ Builder (/build): List containers (/containers/json): -- You can use size=1 to get the size of the containers + - You can use size=1 to get the size of the containers -Start containers (/containers/\/start): +Start containers (/containers//start): -- You can now pass host-specific configuration (e.g. bind mounts) in - the POST body for start calls + - You can now pass host-specific configuration (e.g. bind mounts) in + the POST body for start calls ### v1.2 @@ -289,25 +319,28 @@ docker v0.4.2 #### Full Documentation -#### What’s new +#### What's new The auth configuration is now handled by the client. -The client should send it’s authConfig as POST on each call of -/images/(name)/push +The client should send it's authConfig as POST on each call of +`/images/(name)/push` - `GET /auth` -: **Deprecated.** +`GET /auth` - `POST /auth` -: Only checks the configuration but doesn’t store it on the server +**Deprecated.** + +`POST /auth` + +Only checks the configuration but doesn't store it on the server Deleting an image is now improved, will only untag the image if it has children and remove all the untagged parents if has any. - `POST /images//delete` -: Now returns a JSON structure with the list of images - deleted/untagged. +`POST /images//delete` + +Now returns a JSON structure with the list of images +deleted/untagged. ### v1.1 @@ -316,24 +349,23 @@ docker v0.4.0 #### Full Documentation -#### What’s new +#### What's new - `POST /images/create` -: +`POST /images/create` - `POST /images/`(*name*)`/insert` -: +`POST /images/(name)/insert` - `POST /images/`(*name*)`/push` -: Uses json stream instead of HTML hijack, it looks like this: +`POST /images/(name)/push` - > HTTP/1.1 200 OK - > Content-Type: application/json - > - > {"status":"Pushing..."} - > {"status":"Pushing", "progress":"1/? (n/a)"} - > {"error":"Invalid..."} - > ... +Uses json stream instead of HTML hijack, it looks like this: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"Pushing..."} + {"status":"Pushing", "progress":"1/? (n/a)"} + {"error":"Invalid..."} + ... ### v1.0 @@ -342,6 +374,6 @@ docker v0.3.4 #### Full Documentation -#### What’s new +#### What's new Initial version diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 02d13403ef..474857bac3 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -6,23 +6,23 @@ page_keywords: API, Docker, rcli, REST, documentation ## 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## 2. Endpoints +# 2. Endpoints -### 2.1 Containers +## 2.1 Containers -#### List containers +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### Create a container +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -149,7 +150,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Query Parameters: @@ -165,11 +166,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### Inspect a container +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` +Return low-level information on the container `id` **Example request**: @@ -248,10 +249,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### List processes running inside a container +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -294,10 +296,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Inspect changes on a container’s filesystem +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id` 's filesystem **Example request**: @@ -329,10 +332,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Export a container +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -351,10 +355,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Start a container +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -380,7 +385,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -388,10 +393,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Stop a container +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -413,10 +419,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Restart a container +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -438,10 +445,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Kill a container +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -457,10 +465,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Attach to a container +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -500,7 +509,7 @@ page_keywords: API, Docker, rcli, REST, documentation When using the TTY setting is enabled in [`POST /containers/create` ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), - the stream is the raw data from the process PTY and client’s stdin. + the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -539,10 +548,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### Wait a container +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -562,9 +572,9 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Remove a container +### Remove a container - `DELETE /containers/`(*id*) + `DELETE /containers/(id*) : Remove the container `id` from the filesystem **Example request**: @@ -591,10 +601,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Copy files or folders from a container +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -620,10 +631,11 @@ page_keywords: API, Docker, rcli, REST, documentation ### 2.2 Images -#### List Images +### List Images - `GET /images/json` -: **Example request**: +`GET /images/json` + +**Example request**: GET /images/json?all=0 HTTP/1.1 @@ -657,10 +669,11 @@ page_keywords: API, Docker, rcli, REST, documentation } ] -#### Create an image +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -702,10 +715,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Insert a file in an image +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -727,10 +741,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Inspect an image +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -774,10 +789,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Get the history of an image +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -807,10 +823,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Push an image on the registry +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -845,10 +862,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Tag an image into a repository +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -873,9 +891,9 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Remove an image +### Remove an image - `DELETE /images/`(*name*) + `DELETE /images/(name*) : Remove the image `name` from the filesystem **Example request**: @@ -907,14 +925,15 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Search images +### Search images - `GET /images/search` -: Search for an image in the docker index. +`GET /images/search` + +Search for an image in the docker index. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON -> sent by the registry server to the docker daemon’s request. +> sent by the registry server to the docker daemon's request. **Example request**: @@ -963,10 +982,11 @@ page_keywords: API, Docker, rcli, REST, documentation ### 2.3 Misc -#### Build an image from Dockerfile via stdin +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -1013,10 +1033,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Check auth configuration +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1040,10 +1061,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### Display system-wide information +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1070,10 +1092,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Show the docker version information +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1095,10 +1118,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Create a new image from a container’s changes +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1120,7 +1144,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **run** – config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) @@ -1130,10 +1154,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Monitor Docker’s events +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via +`GET /events` + +Get events from docker, either in real time via streaming, or via polling (using since) **Example request**: @@ -1161,10 +1186,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Get a tarball containing all images and tags in a repository +### Get a tarball containing all images and tags in a repository - `GET /images/`(*name*)`/get` -: Get a tarball containing all images and metadata for the repository +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified by `name`. **Example request** @@ -1183,10 +1209,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Load a tarball with a set of images and tags into docker +### Load a tarball with a set of images and tags into docker - `POST /images/load` -: Load a set of images and tags into the docker repository. +`POST /images/load` + +Load a set of images and tags into the docker repository. **Example request** @@ -1203,33 +1230,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## 3. Going further +# 3. Going further -### 3.1 Inside ‘docker run’ +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container + - Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If in detached mode or only stdin is attached: + - Display the container's id -### 3.2 Hijacking +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### 3.3 CORS Requests +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 6e038acd82..af47fdefbf 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -6,23 +6,23 @@ page_keywords: API, Docker, rcli, REST, documentation ## 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## 2. Endpoints +# 2. Endpoints -### 2.1 Containers +## 2.1 Containers -#### List containers +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### Create a container +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -150,7 +151,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **config** – the container’s configuration + - **config** – the container's configuration Query Parameters: @@ -166,10 +167,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### Inspect a container +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` + +Return low-level information on the container `id` **Example request**: @@ -251,10 +253,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### List processes running inside a container +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -289,7 +292,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -297,10 +300,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Inspect changes on a container’s filesystem +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -332,10 +336,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Export a container +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -354,10 +359,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Start a container +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -381,7 +387,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **hostConfig** – the container’s host configuration (optional) + - **hostConfig** – the container's host configuration (optional) Status Codes: @@ -389,10 +395,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Stop a container +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -414,10 +421,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Restart a container +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -439,10 +447,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Kill a container +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -458,10 +467,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Attach to a container +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -500,8 +510,8 @@ page_keywords: API, Docker, rcli, REST, documentation When using the TTY setting is enabled in [`POST /containers/create` -](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), - the stream is the raw data from the process PTY and client’s stdin. + ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -540,11 +550,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### Wait a container +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -563,10 +573,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Remove a container +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -592,10 +603,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Copy files or folders from a container +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -619,12 +631,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### 2.2 Images +## 2.2 Images -#### List Images +### List Images - `GET /images/json` -: **Example request**: +`GET /images/json` + +**Example request**: GET /images/json?all=0 HTTP/1.1 @@ -658,11 +671,11 @@ page_keywords: API, Docker, rcli, REST, documentation } ] -#### Create an image +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -703,11 +716,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Insert a file in an image +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -728,10 +741,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Inspect an image +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -777,10 +791,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Get the history of an image +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -810,10 +825,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Push an image on the registry +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -848,10 +864,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Tag an image into a repository +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -876,10 +893,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Remove an image +### Remove an image - `DELETE /images/`(*name*) -: Remove the image `name` from the filesystem +`DELETE /images/(name)` + +Remove the image `name` from the filesystem **Example request**: @@ -910,14 +928,15 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Search images +### Search images - `GET /images/search` -: Search for an image in the docker index. +`GET /images/search` + +Search for an image in the docker index. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON -> sent by the registry server to the docker daemon’s request. +> sent by the registry server to the docker daemon's request. **Example request**: @@ -964,12 +983,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -### 2.3 Misc +## 2.3 Misc -#### Build an image from Dockerfile via stdin +### Build an image from Dockerfile via stdin - `POST /build` -: Build an image from Dockerfile via stdin +`POST /build` + +Build an image from Dockerfile via stdin **Example request**: @@ -990,7 +1010,7 @@ page_keywords: API, Docker, rcli, REST, documentation following algorithms: identity (no compression), gzip, bzip2, xz. The archive must include a file called `Dockerfile` - at its root. It may include any number of other files, + at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build command*](../../builder/#dockerbuilder)). @@ -1016,10 +1036,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Check auth configuration +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1043,10 +1064,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### Display system-wide information +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1073,10 +1095,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Show the docker version information +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1098,10 +1121,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Create a new image from a container’s changes +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1123,7 +1147,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **run** – config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) @@ -1133,11 +1157,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Monitor Docker’s events +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or +via polling (using since) **Example request**: @@ -1165,11 +1190,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Get a tarball containing all images and tags in a repository +### Get a tarball containing all images and tags in a repository - `GET /images/`(*name*)`/get` -: Get a tarball containing all images and metadata for the repository - specified by `name`. +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository +specified by `name`. **Example request** @@ -1187,10 +1213,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Load a tarball with a set of images and tags into docker +### Load a tarball with a set of images and tags into docker - `POST /images/load` -: Load a set of images and tags into the docker repository. +`POST /images/load` + +Load a set of images and tags into the docker repository. **Example request** @@ -1207,33 +1234,33 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## 3. Going further +# 3. Going further -### 3.1 Inside ‘docker run’ +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run`: -- Create the container +- Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container +- If the status code is 404, it means the image doesn't exists: + - Try to pull it + - Then retry to create the container -- Start the container +- Start the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 +- If you are not in detached mode: + - Attach to the container, using logs=1 (to have stdout and + stderr from the container's start) and stream=1 -- If in detached mode or only stdin is attached: - : - Display the container’s id +- If in detached mode or only stdin is attached: + - Display the container's id -### 3.2 Hijacking +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### 3.3 CORS Requests +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index aaa8dc194b..be1c76aee4 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -4,25 +4,25 @@ page_keywords: API, Docker, rcli, REST, documentation # Docker Remote API v1.9 -## 1. Brief introduction +# 1. Brief introduction -- The Remote API has replaced rcli -- The daemon listens on `unix:///var/run/docker.sock` -, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). -- The API tends to be REST, but for some complex commands, like - `attach` or `pull`, the HTTP - connection is hijacked to transport `stdout, stdin` - and `stderr` + - The Remote API has replaced rcli + - The daemon listens on `unix:///var/run/docker.sock` but you can + [*Bind Docker to another host/port or a Unix socket*]( + ../../../use/basics/#bind-docker). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` + and `stderr` -## 2. Endpoints +# 2. Endpoints -### 2.1 Containers +## 2.1 Containers -#### List containers +### List containers - `GET /containers/json` -: List containers +`GET /containers/json` + +List containers. **Example request**: @@ -97,10 +97,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **400** – bad parameter - **500** – server error -#### Create a container +### Create a container - `POST /containers/create` -: Create a container +`POST /containers/create` + +Create a container **Example request**: @@ -179,11 +180,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **406** – impossible to attach (container not running) - **500** – server error -#### Inspect a container +### Inspect a container - `GET /containers/`(*id*)`/json` -: Return low-level information on the container `id` +`GET /containers/(id)/json` +Return low-level information on the container `id` **Example request**: @@ -264,10 +265,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### List processes running inside a container +### List processes running inside a container - `GET /containers/`(*id*)`/top` -: List processes running inside the container `id` +`GET /containers/(id)/top` + +List processes running inside the container `id` **Example request**: @@ -302,7 +304,7 @@ page_keywords: API, Docker, rcli, REST, documentation   - - **ps\_args** – ps arguments to use (eg. aux) + - **ps_args** – ps arguments to use (eg. aux) Status Codes: @@ -310,10 +312,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Inspect changes on a container’s filesystem +### Inspect changes on a container's filesystem - `GET /containers/`(*id*)`/changes` -: Inspect changes on container `id` ‘s filesystem +`GET /containers/(id)/changes` + +Inspect changes on container `id`'s filesystem **Example request**: @@ -345,10 +348,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Export a container +### Export a container - `GET /containers/`(*id*)`/export` -: Export the contents of container `id` +`GET /containers/(id)/export` + +Export the contents of container `id` **Example request**: @@ -367,10 +371,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Start a container +### Start a container - `POST /containers/`(*id*)`/start` -: Start the container `id` +`POST /containers/(id)/start` + +Start the container `id` **Example request**: @@ -411,10 +416,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Stop a container +### Stop a container - `POST /containers/`(*id*)`/stop` -: Stop the container `id` +`POST /containers/(id)/stop` + +Stop the container `id` **Example request**: @@ -436,10 +442,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Restart a container +### Restart a container - `POST /containers/`(*id*)`/restart` -: Restart the container `id` +`POST /containers/(id)/restart` + +Restart the container `id` **Example request**: @@ -461,10 +468,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Kill a container +### Kill a container - `POST /containers/`(*id*)`/kill` -: Kill the container `id` +`POST /containers/(id)/kill` + +Kill the container `id` **Example request**: @@ -480,10 +488,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Attach to a container +### Attach to a container - `POST /containers/`(*id*)`/attach` -: Attach to the container `id` +`POST /containers/(id)/attach` + +Attach to the container `id` **Example request**: @@ -521,9 +530,8 @@ page_keywords: API, Docker, rcli, REST, documentation **Stream details**: When using the TTY setting is enabled in - [`POST /containers/create` -](#post--containers-create "POST /containers/create"), the - stream is the raw data from the process PTY and client’s stdin. When + [`POST /containers/create`](#post--containers-create), the + stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -562,11 +570,11 @@ page_keywords: API, Docker, rcli, REST, documentation 4. Read the extracted size and output it on the correct output 5. Goto 1) -#### Wait a container +### Wait a container - `POST /containers/`(*id*)`/wait` -: Block until container `id` stops, then returns - the exit code +`POST /containers/(id)/wait` + +Block until container `id` stops, then returns the exit code **Example request**: @@ -585,10 +593,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Remove a container +### Remove a container - `DELETE /containers/`(*id*) -: Remove the container `id` from the filesystem +`DELETE /containers/(id)` + +Remove the container `id` from the filesystem **Example request**: @@ -612,10 +621,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Copy files or folders from a container +### Copy files or folders from a container - `POST /containers/`(*id*)`/copy` -: Copy files or folders of container `id` +`POST /containers/(id)/copy` + +Copy files or folders of container `id` **Example request**: @@ -639,12 +649,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -### 2.2 Images +## 2.2 Images -#### List Images +### List Images - `GET /images/json` -: **Example request**: +`GET /images/json` + +**Example request**: GET /images/json?all=0 HTTP/1.1 @@ -678,11 +689,11 @@ page_keywords: API, Docker, rcli, REST, documentation } ] -#### Create an image +### Create an image - `POST /images/create` -: Create an image, either by pull it from the registry or by importing - it +`POST /images/create` + +Create an image, either by pull it from the registry or by importing it **Example request**: @@ -723,11 +734,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Insert a file in an image +### Insert a file in an image - `POST /images/`(*name*)`/insert` -: Insert a file from `url` in the image - `name` at `path` +`POST /images/(name)/insert` + +Insert a file from `url` in the image `name` at `path` **Example request**: @@ -748,10 +759,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Inspect an image +### Inspect an image - `GET /images/`(*name*)`/json` -: Return low-level information on the image `name` +`GET /images/(name)/json` + +Return low-level information on the image `name` **Example request**: @@ -797,10 +809,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Get the history of an image +### Get the history of an image - `GET /images/`(*name*)`/history` -: Return the history of the image `name` +`GET /images/(name)/history` + +Return the history of the image `name` **Example request**: @@ -830,10 +843,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Push an image on the registry +### Push an image on the registry - `POST /images/`(*name*)`/push` -: Push the image `name` on the registry +`POST /images/(name)/push` + +Push the image `name` on the registry **Example request**: @@ -868,10 +882,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such image - **500** – server error -#### Tag an image into a repository +### Tag an image into a repository - `POST /images/`(*name*)`/tag` -: Tag the image `name` into a repository +`POST /images/(name)/tag` + +Tag the image `name` into a repository **Example request**: @@ -896,9 +911,9 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Remove an image +### Remove an image - `DELETE /images/`(*name*) +`DELETE /images/(name*) : Remove the image `name` from the filesystem **Example request**: @@ -923,14 +938,15 @@ page_keywords: API, Docker, rcli, REST, documentation - **409** – conflict - **500** – server error -#### Search images +### Search images - `GET /images/search` -: Search for an image in the docker index. +`GET /images/search` + +Search for an image in the docker index. > **Note**: > The response keys have changed from API v1.6 to reflect the JSON -> sent by the registry server to the docker daemon’s request. +> sent by the registry server to the docker daemon's request. **Example request**: @@ -977,12 +993,13 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -### 2.3 Misc +## 2.3 Misc -#### Build an image from Dockerfile +### Build an image from Dockerfile - `POST /build` -: Build an image from Dockerfile using a POST body. +`POST /build` + +Build an image from Dockerfile using a POST body. **Example request**: @@ -1030,10 +1047,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Check auth configuration +### Check auth configuration - `POST /auth` -: Get the default username and email +`POST /auth` + +Get the default username and email **Example request**: @@ -1057,10 +1075,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **204** – no error - **500** – server error -#### Display system-wide information +### Display system-wide information - `GET /info` -: Display system-wide information +`GET /info` + +Display system-wide information **Example request**: @@ -1087,10 +1106,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Show the docker version information +### Show the docker version information - `GET /version` -: Show the docker version information +`GET /version` + +Show the docker version information **Example request**: @@ -1112,10 +1132,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Create a new image from a container’s changes +### Create a new image from a container's changes - `POST /commit` -: Create a new image from a container’s changes +`POST /commit` + +Create a new image from a container's changes **Example request**: @@ -1137,7 +1158,7 @@ page_keywords: API, Docker, rcli, REST, documentation - **tag** – tag - **m** – commit message - **author** – author (eg. "John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>") + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - **run** – config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) @@ -1147,11 +1168,12 @@ page_keywords: API, Docker, rcli, REST, documentation - **404** – no such container - **500** – server error -#### Monitor Docker’s events +### Monitor Docker's events - `GET /events` -: Get events from docker, either in real time via streaming, or via - polling (using since) +`GET /events` + +Get events from docker, either in real time via streaming, or via +polling (using since) **Example request**: @@ -1178,11 +1200,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Get a tarball containing all images and tags in a repository +### Get a tarball containing all images and tags in a repository - `GET /images/`(*name*)`/get` -: Get a tarball containing all images and metadata for the repository - specified by `name`. +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified by `name`. **Example request** @@ -1200,10 +1222,11 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -#### Load a tarball with a set of images and tags into docker +### Load a tarball with a set of images and tags into docker - `POST /images/load` -: Load a set of images and tags into the docker repository. +`POST /images/load` + +Load a set of images and tags into the docker repository. **Example request** @@ -1220,33 +1243,36 @@ page_keywords: API, Docker, rcli, REST, documentation - **200** – no error - **500** – server error -## 3. Going further +# 3. Going further -### 3.1 Inside ‘docker run’ +## 3.1 Inside `docker run` -Here are the steps of ‘docker run’ : +Here are the steps of `docker run` : -- Create the container + - Create the container -- If the status code is 404, it means the image doesn’t exists: - : - Try to pull it - - Then retry to create the container + - If the status code is 404, it means the image doesn't exists: -- Start the container + - Try to pull it + - Then retry to create the container -- If you are not in detached mode: - : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 + - Start the container -- If in detached mode or only stdin is attached: - : - Display the container’s id + - If you are not in detached mode: -### 3.2 Hijacking + - Attach to the container, using logs=1 (to have stdout and + - stderr from the container's start) and stream=1 + + - If in detached mode or only stdin is attached: + + - Display the container's id + +## 3.2 Hijacking In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -### 3.3 CORS Requests +## 3.3 CORS Requests To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/index_api.md b/docs/sources/reference/api/index_api.md index 8f98513cf5..161b3e0c71 100644 --- a/docs/sources/reference/api/index_api.md +++ b/docs/sources/reference/api/index_api.md @@ -14,11 +14,11 @@ page_keywords: API, Docker, index, REST, documentation ### Repositories -### User Repo +#### User Repo - `PUT /v1/repositories/`(*namespace*)`/`(*repo\_name*)`/` -: Create a user repository with the given `namespace` - and `repo_name`. +`PUT /v1/repositories/(namespace)/(repo_name)/` + +Create a user repository with the given `namespace` and `repo_name`. **Example Request**: @@ -34,7 +34,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - **namespace** – the namespace for the repo - - **repo\_name** – the name for the repo + - **repo_name** – the name for the repo **Example Response**: @@ -54,9 +54,9 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active - `DELETE /v1/repositories/`(*namespace*)`/`(*repo\_name*)`/` -: Delete a user repository with the given `namespace` - and `repo_name`. +`DELETE /v1/repositories/(namespace)/(repo_name)/` + +Delete a user repository with the given `namespace` and `repo_name`. **Example Request**: @@ -72,7 +72,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - **namespace** – the namespace for the repo - - **repo\_name** – the name for the repo + - **repo_name** – the name for the repo **Example Response**: @@ -93,12 +93,12 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active -### Library Repo +#### Library Repo - `PUT /v1/repositories/`(*repo\_name*)`/` -: Create a library repository with the given `repo_name` -. This is a restricted feature only available to docker - admins. +`PUT /v1/repositories/(repo_name)/` + +Create a library repository with the given `repo_name`. +This is a restricted feature only available to docker admins. When namespace is missing, it is assumed to be `library` @@ -116,7 +116,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - - **repo\_name** – the library name for the repo + - **repo_name** – the library name for the repo **Example Response**: @@ -136,10 +136,10 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active - `DELETE /v1/repositories/`(*repo\_name*)`/` -: Delete a library repository with the given `repo_name` -. This is a restricted feature only available to docker - admins. +`DELETE /v1/repositories/(repo_name)/` + +Delete a library repository with the given `repo_name`. +This is a restricted feature only available to docker admins. When namespace is missing, it is assumed to be `library` @@ -157,7 +157,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - - **repo\_name** – the library name for the repo + - **repo_name** – the library name for the repo **Example Response**: @@ -180,10 +180,11 @@ page_keywords: API, Docker, index, REST, documentation ### Repository Images -### User Repo Images +#### User Repo Images - `PUT /v1/repositories/`(*namespace*)`/`(*repo\_name*)`/images` -: Update the images for a user repo. +`PUT /v1/repositories/(namespace)/(repo_name)/images` + +Update the images for a user repo. **Example Request**: @@ -199,7 +200,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - **namespace** – the namespace for the repo - - **repo\_name** – the name for the repo + - **repo_name** – the name for the repo **Example Response**: @@ -216,8 +217,9 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active or permission denied - `GET /v1/repositories/`(*namespace*)`/`(*repo\_name*)`/images` -: get the images for a user repo. +`GET /v1/repositories/(namespace)/(repo_name)/images` + +Get the images for a user repo. **Example Request**: @@ -228,7 +230,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - **namespace** – the namespace for the repo - - **repo\_name** – the name for the repo + - **repo_name** – the name for the repo **Example Response**: @@ -246,10 +248,11 @@ page_keywords: API, Docker, index, REST, documentation - **200** – OK - **404** – Not found -### Library Repo Images +#### Library Repo Images - `PUT /v1/repositories/`(*repo\_name*)`/images` -: Update the images for a library repo. +`PUT /v1/repositories/(repo_name)/images` + +Update the images for a library repo. **Example Request**: @@ -264,7 +267,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - - **repo\_name** – the library name for the repo + - **repo_name** – the library name for the repo **Example Response**: @@ -281,8 +284,9 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active or permission denied - `GET /v1/repositories/`(*repo\_name*)`/images` -: get the images for a library repo. +`GET /v1/repositories/(repo_name)/images` + +Get the images for a library repo. **Example Request**: @@ -292,7 +296,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - - **repo\_name** – the library name for the repo + - **repo_name** – the library name for the repo **Example Response**: @@ -312,10 +316,11 @@ page_keywords: API, Docker, index, REST, documentation ### Repository Authorization -### Library Repo +#### Library Repo - `PUT /v1/repositories/`(*repo\_name*)`/auth` -: authorize a token for a library repo +`PUT /v1/repositories/(repo_name)/auth` + +Authorize a token for a library repo **Example Request**: @@ -326,7 +331,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - - **repo\_name** – the library name for the repo + - **repo_name** – the library name for the repo **Example Response**: @@ -342,10 +347,11 @@ page_keywords: API, Docker, index, REST, documentation - **403** – Permission denied - **404** – Not found -### User Repo +#### User Repo - `PUT /v1/repositories/`(*namespace*)`/`(*repo\_name*)`/auth` -: authorize a token for a user repo +`PUT /v1/repositories/(namespace)/(repo_name)/auth` + +Authorize a token for a user repo **Example Request**: @@ -357,7 +363,7 @@ page_keywords: API, Docker, index, REST, documentation Parameters: - **namespace** – the namespace for the repo - - **repo\_name** – the name for the repo + - **repo_name** – the name for the repo **Example Response**: @@ -375,10 +381,11 @@ page_keywords: API, Docker, index, REST, documentation ### Users -### User Login +#### User Login - `GET /v1/users` -: If you want to check your login, you can try this endpoint +`GET /v1/users` + +If you want to check your login, you can try this endpoint **Example Request**: @@ -401,10 +408,11 @@ page_keywords: API, Docker, index, REST, documentation - **401** – Unauthorized - **403** – Account is not Active -### User Register +#### User Register - `POST /v1/users` -: Registering a new account. +`POST /v1/users` + +Registering a new account. **Example request**: @@ -423,7 +431,7 @@ page_keywords: API, Docker, index, REST, documentation - **email** – valid email address, that needs to be confirmed - **username** – min 4 character, max 30 characters, must match - the regular expression [a-z0-9\_]. + the regular expression [a-z0-9_]. - **password** – min 5 characters **Example Response**: @@ -439,10 +447,12 @@ page_keywords: API, Docker, index, REST, documentation - **201** – User Created - **400** – Errors (invalid json, missing or invalid fields, etc) -### Update User +#### Update User + +`PUT /v1/users/(username)/` + +Change a password or email address for given user. If you pass in an - `PUT /v1/users/`(*username*)`/` -: Change a password or email address for given user. If you pass in an email, it will add it to your account, it will not remove the old one. Passwords will be updated. @@ -487,8 +497,10 @@ If you need to search the index, this is the endpoint you would use. ### Search - `GET /v1/search` -: Search the Index given a search term. It accepts +`GET /v1/search` + +Search the Index given a search term. It accepts + [GET](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.3) only. @@ -521,5 +533,3 @@ If you need to search the index, this is the endpoint you would use. - **200** – no error - **500** – server error - - diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index 09035515f5..f8bdd6657d 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -6,51 +6,51 @@ page_keywords: API, Docker, index, registry, REST, documentation ## Introduction -- This is the REST API for the Docker Registry -- It stores the images and the graph for a set of repositories -- It does not have user accounts data -- It has no notion of user accounts or authorization -- It delegates authentication and authorization to the Index Auth - service using tokens -- It supports different storage backends (S3, cloud files, local FS) -- It doesn’t have a local database -- It will be open-sourced at some point + - This is the REST API for the Docker Registry + - It stores the images and the graph for a set of repositories + - It does not have user accounts data + - It has no notion of user accounts or authorization + - It delegates authentication and authorization to the Index Auth + service using tokens + - It supports different storage backends (S3, cloud files, local FS) + - It doesn't have a local database + - It will be open-sourced at some point We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: -- **sponsor registry**: such a registry is provided by a third-party - hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third - party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates - authentication and authorization to the Index. -- **mirror registry**: such a registry is provided by a third-party - hosting infrastructure but is targeted at their customers only. Some - mechanism (unspecified to date) ensures that public images are - pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” - those images locally. -- **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated - and managed by the vendor. Only users authorized by the vendor would - be able to get write access. Some images would be public (accessible - for anyone), others private (accessible only for authorized users). - Authentication and authorization would be delegated to the Index. - The goal of vendor registries is to let someone do “docker pull - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a - sponsor registry, while retaining control on the asset distribution. -- **private registry**: such a registry is located behind a firewall, - or protected by an additional security layer (HTTP authorization, - SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud’s - control. It can optionally delegate additional authorization to the - Index, but it is not mandatory. + - **sponsor registry**: such a registry is provided by a third-party + hosting infrastructure as a convenience for their customers and the + docker community as a whole. Its costs are supported by the third + party, but the management and operation of the registry are + supported by dotCloud. It features read/write access, and delegates + authentication and authorization to the Index. + - **mirror registry**: such a registry is provided by a third-party + hosting infrastructure but is targeted at their customers only. Some + mechanism (unspecified to date) ensures that public images are + pulled from a sponsor registry to the mirror registry, to make sure + that the customers of the third-party provider can “docker pull” + those images locally. + - **vendor registry**: such a registry is provided by a software + vendor, who wants to distribute docker images. It would be operated + and managed by the vendor. Only users authorized by the vendor would + be able to get write access. Some images would be public (accessible + for anyone), others private (accessible only for authorized users). + Authentication and authorization would be delegated to the Index. + The goal of vendor registries is to let someone do “docker pull + basho/riak1.3” and automatically push from the vendor registry + (instead of a sponsor registry); i.e. get all the convenience of a + sponsor registry, while retaining control on the asset distribution. + - **private registry**: such a registry is located behind a firewall, + or protected by an additional security layer (HTTP authorization, + SSL client-side certificates, IP address authorization...). The + registry is operated by a private entity, outside of dotCloud's + control. It can optionally delegate additional authorization to the + Index, but it is not mandatory. > **Note**: > Mirror registries and private registries which do not use the Index -> don’t even need to run the registry code. They can be implemented by any +> don't even need to run the registry code. They can be implemented by any > kind of transport implementing HTTP GET and PUT. Read-only registries > can be powered by a simple static HTTP server. @@ -63,19 +63,19 @@ grasp the context, here are some examples of registries: > - remote docker addressed through SSH. The latter would only require two new commands in docker, e.g. -`registryget` and `registryput`, -wrapping access to the local filesystem (and optionally doing -consistency checks). Authentication and authorization are then delegated -to SSH (e.g. with public keys). +`registryget` and `registryput`, wrapping access to the local filesystem +(and optionally doing consistency checks). Authentication and authorization +are then delegated to SSH (e.g. with public keys). -## Endpoints +# Endpoints -### Images +## Images ### Layer - `GET /v1/images/`(*image\_id*)`/layer` -: get image layer for a given `image_id` +`GET /v1/images/(image_id)/layer` + +Get image layer for a given `image_id` **Example Request**: @@ -87,7 +87,7 @@ to SSH (e.g. with public keys). Parameters: - - **image\_id** – the id for the layer you want to get + - **image_id** – the id for the layer you want to get **Example Response**: @@ -104,8 +104,9 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Image not found - `PUT /v1/images/`(*image\_id*)`/layer` -: put image layer for a given `image_id` +`PUT /v1/images/(image_id)/layer` + +Put image layer for a given `image_id` **Example Request**: @@ -118,7 +119,7 @@ to SSH (e.g. with public keys). Parameters: - - **image\_id** – the id for the layer you want to get + - **image_id** – the id for the layer you want to get **Example Response**: @@ -135,10 +136,11 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Image not found -### Image +## Image - `PUT /v1/images/`(*image\_id*)`/json` -: put image for a given `image_id` +`PUT /v1/images/(image_id)/json` + +Put image for a given `image_id` **Example Request**: @@ -181,7 +183,7 @@ to SSH (e.g. with public keys). Parameters: - - **image\_id** – the id for the layer you want to get + - **image_id** – the id for the layer you want to get **Example Response**: @@ -197,8 +199,9 @@ to SSH (e.g. with public keys). - **200** – OK - **401** – Requires authorization - `GET /v1/images/`(*image\_id*)`/json` -: get image for a given `image_id` +`GET /v1/images/(image_id)/json` + +Get image for a given `image_id` **Example Request**: @@ -210,7 +213,7 @@ to SSH (e.g. with public keys). Parameters: - - **image\_id** – the id for the layer you want to get + - **image_id** – the id for the layer you want to get **Example Response**: @@ -258,10 +261,11 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Image not found -### Ancestry +## Ancestry - `GET /v1/images/`(*image\_id*)`/ancestry` -: get ancestry for an image given an `image_id` +`GET /v1/images/(image_id)/ancestry` + +Get ancestry for an image given an `image_id` **Example Request**: @@ -273,7 +277,7 @@ to SSH (e.g. with public keys). Parameters: - - **image\_id** – the id for the layer you want to get + - **image_id** – the id for the layer you want to get **Example Response**: @@ -293,10 +297,11 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Image not found -### Tags +## Tags - `GET /v1/repositories/`(*namespace*)`/`(*repository*)`/tags` -: get all of the tags for the given repo. +`GET /v1/repositories/(namespace)/(repository)/tags` + +Get all of the tags for the given repo. **Example Request**: @@ -330,8 +335,9 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Repository not found - `GET /v1/repositories/`(*namespace*)`/`(*repository*)`/tags/`(*tag*) -: get a tag for the given repo. +`GET /v1/repositories/(namespace)/(repository)/tags/(tag*): + +Get a tag for the given repo. **Example Request**: @@ -363,8 +369,9 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Tag not found - `DELETE /v1/repositories/`(*namespace*)`/`(*repository*)`/tags/`(*tag*) -: delete the tag for the repo +`DELETE /v1/repositories/(namespace)/(repository)/tags/(tag*): + +Delete the tag for the repo **Example Request**: @@ -395,8 +402,9 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Tag not found - `PUT /v1/repositories/`(*namespace*)`/`(*repository*)`/tags/`(*tag*) -: put a tag for the given repo. +`PUT /v1/repositories/(namespace)/(repository)/tags/(tag*): + +Put a tag for the given repo. **Example Request**: @@ -430,10 +438,11 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Image not found -### Repositories +## Repositories - `DELETE /v1/repositories/`(*namespace*)`/`(*repository*)`/` -: delete a repository +`DELETE /v1/repositories/(namespace)/(repository)/` + +Delete a repository **Example Request**: @@ -465,11 +474,12 @@ to SSH (e.g. with public keys). - **401** – Requires authorization - **404** – Repository not found -### Status +## Status - `GET /v1/_ping` -: Check status of the registry. This endpoint is also used to - determine if the registry supports SSL. +`GET /v1/_ping` + +Check status of the registry. This endpoint is also used to +determine if the registry supports SSL. **Example Request**: diff --git a/docs/sources/reference/api/registry_index_spec.md b/docs/sources/reference/api/registry_index_spec.md index aa18a2e3c5..ab775b2237 100644 --- a/docs/sources/reference/api/registry_index_spec.md +++ b/docs/sources/reference/api/registry_index_spec.md @@ -10,16 +10,16 @@ page_keywords: docker, registry, api, index The Index is responsible for centralizing information about: -- User accounts -- Checksums of the images -- Public namespaces + - User accounts + - Checksums of the images + - Public namespaces The Index has different components: -- Web UI -- Meta-data store (comments, stars, list public repositories) -- Authentication service -- Tokenization + - Web UI + - Meta-data store (comments, stars, list public repositories) + - Authentication service + - Tokenization The index is authoritative for those information. @@ -28,46 +28,46 @@ managed by Docker Inc. ### Registry -- It stores the images and the graph for a set of repositories -- It does not have user accounts data -- It has no notion of user accounts or authorization -- It delegates authentication and authorization to the Index Auth - service using tokens -- It supports different storage backends (S3, cloud files, local FS) -- It doesn’t have a local database -- [Source Code](https://github.com/dotcloud/docker-registry) + - It stores the images and the graph for a set of repositories + - It does not have user accounts data + - It has no notion of user accounts or authorization + - It delegates authentication and authorization to the Index Auth + service using tokens + - It supports different storage backends (S3, cloud files, local FS) + - It doesn't have a local database + - [Source Code](https://github.com/dotcloud/docker-registry) We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: -- **sponsor registry**: such a registry is provided by a third-party - hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third - party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates - authentication and authorization to the Index. -- **mirror registry**: such a registry is provided by a third-party - hosting infrastructure but is targeted at their customers only. Some - mechanism (unspecified to date) ensures that public images are - pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” - those images locally. -- **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated - and managed by the vendor. Only users authorized by the vendor would - be able to get write access. Some images would be public (accessible - for anyone), others private (accessible only for authorized users). - Authentication and authorization would be delegated to the Index. - The goal of vendor registries is to let someone do “docker pull - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a - sponsor registry, while retaining control on the asset distribution. -- **private registry**: such a registry is located behind a firewall, - or protected by an additional security layer (HTTP authorization, - SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud’s - control. It can optionally delegate additional authorization to the - Index, but it is not mandatory. + - **sponsor registry**: such a registry is provided by a third-party + hosting infrastructure as a convenience for their customers and the + docker community as a whole. Its costs are supported by the third + party, but the management and operation of the registry are + supported by dotCloud. It features read/write access, and delegates + authentication and authorization to the Index. + - **mirror registry**: such a registry is provided by a third-party + hosting infrastructure but is targeted at their customers only. Some + mechanism (unspecified to date) ensures that public images are + pulled from a sponsor registry to the mirror registry, to make sure + that the customers of the third-party provider can “docker pull” + those images locally. + - **vendor registry**: such a registry is provided by a software + vendor, who wants to distribute docker images. It would be operated + and managed by the vendor. Only users authorized by the vendor would + be able to get write access. Some images would be public (accessible + for anyone), others private (accessible only for authorized users). + Authentication and authorization would be delegated to the Index. + The goal of vendor registries is to let someone do “docker pull + basho/riak1.3” and automatically push from the vendor registry + (instead of a sponsor registry); i.e. get all the convenience of a + sponsor registry, while retaining control on the asset distribution. + - **private registry**: such a registry is located behind a firewall, + or protected by an additional security layer (HTTP authorization, + SSL client-side certificates, IP address authorization...). The + registry is operated by a private entity, outside of dotCloud's + control. It can optionally delegate additional authorization to the + Index, but it is not mandatory. > **Note:** The latter implies that while HTTP is the protocol > of choice for a registry, multiple schemes are possible (and @@ -88,36 +88,33 @@ to SSH (e.g. with public keys). On top of being a runtime for LXC, Docker is the Registry client. It supports: -- Push / Pull on the registry -- Client authentication on the Index + - Push / Pull on the registry + - Client authentication on the Index ## Workflow ### Pull -![](../../../_images/docker_pull_chart.png) +![](../../../static_files/docker_pull_chart.png) 1. Contact the Index to know where I should download “samalba/busybox” -2. Index replies: a. `samalba/busybox` is on - Registry A b. here are the checksums for `samalba/busybox` - (for all layers) c. token -3. Contact Registry A to receive the layers for - `samalba/busybox` (all of them to the base - image). Registry A is authoritative for “samalba/busybox” but keeps - a copy of all inherited layers and serve them all from the same +2. Index replies: a. `samalba/busybox` is on Registry A b. here are the + checksums for `samalba/busybox` (for all layers) c. token +3. Contact Registry A to receive the layers for `samalba/busybox` (all of + them to the base image). Registry A is authoritative for “samalba/busybox” + but keeps a copy of all inherited layers and serve them all from the same location. -4. registry contacts index to verify if token/user is allowed to - download images -5. Index returns true/false lettings registry know if it should proceed - or error out +4. registry contacts index to verify if token/user is allowed to download images +5. Index returns true/false lettings registry know if it should proceed or error + out 6. Get the payload for all layers -It’s possible to run: +It's possible to run: docker pull https:///repositories/samalba/busybox In this case, Docker bypasses the Index. However the security is not -guaranteed (in case Registry A is corrupted) because there won’t be any +guaranteed (in case Registry A is corrupted) because there won't be any checksum checks. Currently registry redirects to s3 urls for downloads, going forward all @@ -128,60 +125,61 @@ sub-classes for S3 and local storage. Token is only returned when the `X-Docker-Token` header is sent with request. -Basic Auth is required to pull private repos. Basic auth isn’t required +Basic Auth is required to pull private repos. Basic auth isn't required for pulling public repos, but if one is provided, it needs to be valid and for an active account. -#### API (pulling repository foo/bar): +**API (pulling repository foo/bar):** -1. (Docker -\> Index) GET /v1/repositories/foo/bar/images - : **Headers**: - : Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - X-Docker-Token: true +1. (Docker -> Index) GET /v1/repositories/foo/bar/images: + + **Headers**: + Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== + X-Docker-Token: true + + **Action**: + (looking up the foo/bar in db and gets images and checksums + for that repo (all if no tag is specified, if tag, only + checksums for those tags) see part 4.4.1) + +2. (Index -> Docker) HTTP 200 OK + + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=write + X-Docker-Endpoints: registry.docker.io [,registry2.docker.io] + + **Body**: + Jsonified checksums (see part 4.4.1) + +3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest + + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=write + +4. (Registry -> Index) GET /v1/repositories/foo/bar/images + + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=read + + **Body**: + + + **Action**: + (Lookup token see if they have access to pull.) + + If good: + HTTP 200 OK Index will invalidate the token + + If bad: + HTTP 401 Unauthorized + +5. (Docker -> Registry) GET /v1/images/928374982374/ancestry **Action**: - : (looking up the foo/bar in db and gets images and checksums - for that repo (all if no tag is specified, if tag, only - checksums for those tags) see part 4.4.1) - -2. (Index -\> Docker) HTTP 200 OK - - > **Headers**: - > : - Authorization: Token - > signature=123abc,repository=”foo/bar”,access=write - > - X-Docker-Endpoints: registry.docker.io [, - > registry2.docker.io] - > - > **Body**: - > : Jsonified checksums (see part 4.4.1) - > -3. (Docker -\> Registry) GET /v1/repositories/foo/bar/tags/latest - : **Headers**: - : Authorization: Token - signature=123abc,repository=”foo/bar”,access=write - -4. (Registry -\> Index) GET /v1/repositories/foo/bar/images - - > **Headers**: - > : Authorization: Token - > signature=123abc,repository=”foo/bar”,access=read - > - > **Body**: - > : \ - > - > **Action**: - > : ( Lookup token see if they have access to pull.) - > - > If good: - > : HTTP 200 OK Index will invalidate the token - > - > If bad: - > : HTTP 401 Unauthorized - > -5. (Docker -\> Registry) GET /v1/images/928374982374/ancestry - : **Action**: - : (for each image id returned in the registry, fetch /json + - /layer) + (for each image id returned in the registry, fetch /json + /layer) > **Note**: > If someone makes a second request, then we will always give a new token, @@ -189,7 +187,7 @@ and for an active account. ### Push -![](../../../_images/docker_push_chart.png) +![](../../../static_files/docker_push_chart.png) 1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials) @@ -204,7 +202,7 @@ and for an active account. 6. docker contacts the index to give checksums for upload images > **Note:** -> **It’s possible not to use the Index at all!** In this case, a deployed +> **It's possible not to use the Index at all!** In this case, a deployed > version of the Registry is deployed to store and serve images. Those > images are not authenticated and the security is not guaranteed. @@ -218,89 +216,96 @@ the push. When a repository name does not have checksums on the Index, it means that the push is in progress (since checksums are submitted at the end). -#### API (pushing repos foo/bar): +**API (pushing repos foo/bar):** -1. (Docker -\> Index) PUT /v1/repositories/foo/bar/ - : **Headers**: - : Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token: - true +1. (Docker -> Index) PUT /v1/repositories/foo/bar/ - **Action**:: - : - in index, we allocated a new repository, and set to - initialized + **Headers**: + Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token: + true - **Body**:: - : (The body contains the list of images that are going to be - pushed, with empty checksums. The checksums will be set at - the end of the push): - - [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}] - -2. (Index -\> Docker) 200 Created - : **Headers**: - : - WWW-Authenticate: Token - signature=123abc,repository=”foo/bar”,access=write - - X-Docker-Endpoints: registry.docker.io [, - registry2.docker.io] - -3. (Docker -\> Registry) PUT /v1/images/98765432\_parent/json - : **Headers**: - : Authorization: Token - signature=123abc,repository=”foo/bar”,access=write - -4. (Registry-\>Index) GET /v1/repositories/foo/bar/images - : **Headers**: - : Authorization: Token - signature=123abc,repository=”foo/bar”,access=write - - **Action**:: - : - Index: - : will invalidate the token. - - - Registry: - : grants a session (if token is approved) and fetches - the images id - -5. (Docker -\> Registry) PUT /v1/images/98765432\_parent/json - : **Headers**:: - : - Authorization: Token - signature=123abc,repository=”foo/bar”,access=write - - Cookie: (Cookie provided by the Registry) - -6. (Docker -\> Registry) PUT /v1/images/98765432/json - : **Headers**: - : Cookie: (Cookie provided by the Registry) - -7. (Docker -\> Registry) PUT /v1/images/98765432\_parent/layer - : **Headers**: - : Cookie: (Cookie provided by the Registry) - -8. (Docker -\> Registry) PUT /v1/images/98765432/layer - : **Headers**: - : X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh - -9. (Docker -\> Registry) PUT /v1/repositories/foo/bar/tags/latest - : **Headers**: - : Cookie: (Cookie provided by the Registry) + **Action**: + - in index, we allocated a new repository, and set to + initialized **Body**: - : “98765432” + (The body contains the list of images that are going to be + pushed, with empty checksums. The checksums will be set at + the end of the push): -10. (Docker -\> Index) PUT /v1/repositories/foo/bar/images + [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}] - **Headers**: - : Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints: +2. (Index -> Docker) 200 Created + + **Headers**: + - WWW-Authenticate: Token + signature=123abc,repository=”foo/bar”,access=write + - X-Docker-Endpoints: registry.docker.io [, + registry2.docker.io] + +3. (Docker -> Registry) PUT /v1/images/98765432_parent/json + + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=write + +4. (Registry->Index) GET /v1/repositories/foo/bar/images + + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=write + + **Action**: + - Index: + will invalidate the token. + - Registry: + grants a session (if token is approved) and fetches + the images id + +5. (Docker -> Registry) PUT /v1/images/98765432_parent/json + + **Headers**:: + - Authorization: Token + signature=123abc,repository=”foo/bar”,access=write + - Cookie: (Cookie provided by the Registry) + +6. (Docker -> Registry) PUT /v1/images/98765432/json + + **Headers**: + - Cookie: (Cookie provided by the Registry) + +7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer + + **Headers**: + - Cookie: (Cookie provided by the Registry) + +8. (Docker -> Registry) PUT /v1/images/98765432/layer + + **Headers**: + X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh + +9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest + + **Headers**: + - Cookie: (Cookie provided by the Registry) + + **Body**: + “98765432” + +10. (Docker -> Index) PUT /v1/repositories/foo/bar/images + + **Headers**: + Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints: registry1.docker.io (no validation on this right now) - **Body**: - : (The image, id’s, tags and checksums) - + **Body**: + (The image, id`s, tags and checksums) [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}] - **Return** HTTP 204 + **Return**: HTTP 204 > **Note:** If push fails and they need to start again, what happens in the index, > there will already be a record for the namespace/name, but it will be @@ -308,8 +313,8 @@ the end). > case could be if someone pushes the same thing at the same time with two > different shells. -If it’s a retry on the Registry, Docker has a cookie (provided by the -registry after token validation). So the Index won’t have to provide a +If it's a retry on the Registry, Docker has a cookie (provided by the +registry after token validation). So the Index won't have to provide a new token. ### Delete @@ -318,11 +323,9 @@ If you need to delete something from the index or registry, we need a nice clean way to do that. Here is the workflow. 1. Docker contacts the index to request a delete of a repository - `samalba/busybox` (authentication required with - user credentials) -2. If authentication works and repository is valid, - `samalba/busybox` is marked as deleted and a - temporary token is returned + `samalba/busybox` (authentication required with user credentials) +2. If authentication works and repository is valid, `samalba/busybox` + is marked as deleted and a temporary token is returned 3. Send a delete request to the registry for the repository (along with the token) 4. Registry A contacts the Index to verify the token (token must @@ -334,74 +337,79 @@ nice clean way to do that. Here is the workflow. > **Note**: > The Docker client should present an "Are you sure?" prompt to confirm -> the deletion before starting the process. Once it starts it can’t be +> the deletion before starting the process. Once it starts it can't be > undone. -#### API (deleting repository foo/bar): +**API (deleting repository foo/bar):** -1. (Docker -\> Index) DELETE /v1/repositories/foo/bar/ - : **Headers**: - : Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token: - true +1. (Docker -> Index) DELETE /v1/repositories/foo/bar/ - **Action**:: - : - in index, we make sure it is a valid repository, and set - to deleted (logically) + **Headers**: + Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token: + true - **Body**:: - : Empty + **Action**: + - in index, we make sure it is a valid repository, and set + to deleted (logically) -2. (Index -\> Docker) 202 Accepted - : **Headers**: - : - WWW-Authenticate: Token - signature=123abc,repository=”foo/bar”,access=delete - - X-Docker-Endpoints: registry.docker.io [, - registry2.docker.io] \# list of endpoints where this - repo lives. + **Body**: + Empty -3. (Docker -\> Registry) DELETE /v1/repositories/foo/bar/ - : **Headers**: - : Authorization: Token - signature=123abc,repository=”foo/bar”,access=delete +2. (Index -> Docker) 202 Accepted -4. (Registry-\>Index) PUT /v1/repositories/foo/bar/auth - : **Headers**: - : Authorization: Token - signature=123abc,repository=”foo/bar”,access=delete + **Headers**: + - WWW-Authenticate: Token + signature=123abc,repository=”foo/bar”,access=delete + - X-Docker-Endpoints: registry.docker.io [, + registry2.docker.io] + # list of endpoints where this repo lives. - **Action**:: - : - Index: - : will invalidate the token. +3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/ - - Registry: - : deletes the repository (if token is approved) + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=delete -5. (Registry -\> Docker) 200 OK - : 200 If success 403 if forbidden 400 if bad request 404 if - repository isn’t found +4. (Registry->Index) PUT /v1/repositories/foo/bar/auth -6. (Docker -\> Index) DELETE /v1/repositories/foo/bar/ + **Headers**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=delete - > **Headers**: - > : Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints: - > registry-1.docker.io (no validation on this right now) - > - > **Body**: - > : Empty - > - > **Return** HTTP 200 + **Action**: + - Index: + will invalidate the token. + - Registry: + deletes the repository (if token is approved) + +5. (Registry -> Docker) 200 OK + + 200 If success 403 if forbidden 400 if bad request 404 + if repository isn't found + +6. (Docker -> Index) DELETE /v1/repositories/foo/bar/ + + **Headers**: + Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints: + registry-1.docker.io (no validation on this right now) + + **Body**: + Empty + + **Return**: HTTP 200 ## How to use the Registry in standalone mode The Index has two main purposes (along with its fancy social features): -- Resolve short names (to avoid passing absolute URLs all the time) - : - username/projectname -\> - https://registry.docker.io/users/\/repositories/\/ - - team/projectname -\> - https://registry.docker.io/team/\/repositories/\/ + - Resolve short names (to avoid passing absolute URLs all the time): -- Authenticate a user as a repos owner (for a central referenced + username/projectname -> + https://registry.docker.io/users//repositories// + team/projectname -> + https://registry.docker.io/team//repositories// + + - Authenticate a user as a repos owner (for a central referenced repository) ### Without an Index @@ -429,17 +437,17 @@ no write access is necessary). The Index data needed by the Registry are simple: -- Serve the checksums -- Provide and authorize a Token + - Serve the checksums + - Provide and authorize a Token In the scenario of a Registry running on a private network with the need -of centralizing and authorizing, it’s easy to use a custom Index. +of centralizing and authorizing, it's easy to use a custom Index. The only challenge will be to tell Docker to contact (and trust) this custom Index. Docker will be configurable at some point to use a -specific Index, it’ll be the private entity responsibility (basically +specific Index, it'll be the private entity responsibility (basically the organization who uses Docker in a private environment) to maintain -the Index and the Docker’s configuration among its consumers. +the Index and the Docker's configuration among its consumers. ## The API @@ -453,7 +461,7 @@ JSON), basically because Registry stores exactly the same kind of information as Docker uses to manage them. The format of ancestry is a line-separated list of image ids, in age -order, i.e. the image’s parent is on the last line, the parent of the +order, i.e. the image's parent is on the last line, the parent of the parent on the next-to-last line, etc.; if the image has no parent, the file is empty. @@ -468,17 +476,18 @@ file is empty. ### Create a user (Index) -POST /v1/users + POST /v1/users: -**Body**: -: {"email": "[sam@dotcloud.com](mailto:sam%40dotcloud.com)", - "password": "toto42", "username": "foobar"’} -**Validation**: -: - **username**: min 4 character, max 30 characters, must match the - regular expression [a-z0-9\_]. + **Body**: + {"email": "[sam@dotcloud.com](mailto:sam%40dotcloud.com)", + "password": "toto42", "username": "foobar"`} + + **Validation**: + - **username**: min 4 character, max 30 characters, must match the + regular expression [a-z0-9_]. - **password**: min 5 characters -**Valid**: return HTTP 200 + **Valid**: return HTTP 200 Errors: HTTP 400 (we should create error codes for possible errors) - invalid json - missing field - wrong format (username, password, email, @@ -490,10 +499,10 @@ etc) - forbidden name - name already exists ### Update a user (Index) -PUT /v1/users/\ + PUT /v1/users/ -**Body**: -: {"password": "toto"} + **Body**: + {"password": "toto"} > **Note**: > We can also update email address, if they do, they will need to reverify @@ -506,44 +515,44 @@ validate credentials. HTTP Basic Auth for now, maybe change in future. GET /v1/users -**Return**: -: - Valid: HTTP 200 - - Invalid login: HTTP 401 - - Account inactive: HTTP 403 Account is not Active + **Return**: + - Valid: HTTP 200 + - Invalid login: HTTP 401 + - Account inactive: HTTP 403 Account is not Active ### Tags (Registry) The Registry does not know anything about users. Even though -repositories are under usernames, it’s just a namespace for the +repositories are under usernames, it's just a namespace for the registry. Allowing us to implement organizations or different namespaces -per user later, without modifying the Registry’s API. +per user later, without modifying the Registry'sAPI. The following naming restrictions apply: -- Namespaces must match the same regular expression as usernames (See + - Namespaces must match the same regular expression as usernames (See 4.2.1.) -- Repository names must match the regular expression [a-zA-Z0-9-\_.] + - Repository names must match the regular expression [a-zA-Z0-9-_.] ### Get all tags: -GET /v1/repositories/\/\/tags +GET /v1/repositories///tags -**Return**: HTTP 200 -: { "latest": + **Return**: HTTP 200 + { "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", “0.1.1”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087” } -#### 4.3.2 Read the content of a tag (resolve the image id) + **4.3.2 Read the content of a tag (resolve the image id):** -GET /v1/repositories/\/\/tags/\ + GET /v1/repositories///tags/ -**Return**: -: "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" + **Return**: + "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" -#### 4.3.3 Delete a tag (registry) + **4.3.3 Delete a tag (registry):** -DELETE /v1/repositories/\/\/tags/\ + DELETE /v1/repositories///tags/ ### 4.4 Images (Index) @@ -552,12 +561,12 @@ it uses the X-Docker-Endpoints header. In other terms, this requests always add a `X-Docker-Endpoints` to indicate the location of the registry which hosts this repository. -#### 4.4.1 Get the images +**4.4.1 Get the images:** -GET /v1/repositories/\/\/images + GET /v1/repositories///images -**Return**: HTTP 200 -: [{“id”: + **Return**: HTTP 200 + [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “[md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087](md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087)”}] @@ -566,22 +575,22 @@ GET /v1/repositories/\/\/images You always add images, you never remove them. -PUT /v1/repositories/\/\/images + PUT /v1/repositories///images -**Body**: -: [ {“id”: + **Body**: + [ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ] -**Return** 204 + **Return**: 204 ### Repositories ### Remove a Repository (Registry) -DELETE /v1/repositories/\/\ +DELETE /v1/repositories// Return 200 OK @@ -589,16 +598,16 @@ Return 200 OK This starts the delete process. see 2.3 for more details. -DELETE /v1/repositories/\/\ +DELETE /v1/repositories// Return 202 OK ## Chaining Registries -It’s possible to chain Registries server for several reasons: +It's possible to chain Registries server for several reasons: -- Load balancing -- Delegate the next request to another server + - Load balancing + - Delegate the next request to another server When a Registry is a reference for a repository, it should host the entire images chain in order to avoid breaking the chain during the @@ -631,32 +640,30 @@ You have 3 options: 1. Provide user credentials and ask for a token - > **Header**: - > : - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - > - X-Docker-Token: true - > - > In this case, along with the 200 response, you’ll get a new token - > (if user auth is ok): If authorization isn’t correct you get a 401 - > response. If account isn’t active you will get a 403 response. - > - > **Response**: - > : - 200 OK - > - X-Docker-Token: Token - > signature=123abc,repository=”foo/bar”,access=read - > + **Header**: + - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== + - X-Docker-Token: true + + In this case, along with the 200 response, you'll get a new token + (if user auth is ok): If authorization isn't correct you get a 401 + response. If account isn't active you will get a 403 response. + + **Response**: + - 200 OK + - X-Docker-Token: Token + signature=123abc,repository=”foo/bar”,access=read + 2. Provide user credentials only - > **Header**: - > : Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - > + **Header**: + Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== 3. Provide Token - > **Header**: - > : Authorization: Token - > signature=123abc,repository=”foo/bar”,access=read - > + **Header**: + Authorization: Token + signature=123abc,repository=”foo/bar”,access=read ### 6.2 On the Registry @@ -684,7 +691,7 @@ Next request: ## Document Version -- 1.0 : May 6th 2013 : initial release -- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new + - 1.0 : May 6th 2013 : initial release + - 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace. diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md index eb1e3a4ee1..4b90afc5b0 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.md +++ b/docs/sources/reference/api/remote_api_client_libraries.md @@ -9,81 +9,124 @@ compatibility. Please file issues with the library owners. If you find more library implementations, please list them in Docker doc bugs and we will add the libraries here. - ------------------------------------------------------------------------- - Language/Framewor Name Repository Status - k - ----------------- ------------ ---------------------------------- ------- - Python docker-py [https://github.com/dotcloud/docke Active - r-py](https://github.com/dotcloud/ - docker-py) - - Ruby docker-clien [https://github.com/geku/docker-cl Outdate - t ient](https://github.com/geku/dock d - er-client) - - Ruby docker-api [https://github.com/swipely/docker Active - -api](https://github.com/swipely/d - ocker-api) - - JavaScript dockerode [https://github.com/apocas/dockero Active - (NodeJS) de](https://github.com/apocas/dock - erode) - Install via NPM: npm install - dockerode - - JavaScript docker.io [https://github.com/appersonlabs/d Active - (NodeJS) ocker.io](https://github.com/apper - sonlabs/docker.io) - Install via NPM: npm install - docker.io - - JavaScript docker-js [https://github.com/dgoujard/docke Outdate - r-js](https://github.com/dgoujard/ d - docker-js) - - JavaScript docker-cp [https://github.com/13W/docker-cp] Active - (Angular) (https://github.com/13W/docker-cp) - **WebUI** - - JavaScript dockerui [https://github.com/crosbymichael/ Active - (Angular) dockerui](https://github.com/crosb - **WebUI** ymichael/dockerui) - - Java docker-java [https://github.com/kpelykh/docker Active - -java](https://github.com/kpelykh/ - docker-java) - - Erlang erldocker [https://github.com/proger/erldock Active - er](https://github.com/proger/erld - ocker) - - Go go-dockercli [https://github.com/fsouza/go-dock Active - ent erclient](https://github.com/fsouz - a/go-dockerclient) - - Go dockerclient [https://github.com/samalba/docker Active - client](https://github.com/samalba - /dockerclient) - - PHP Alvine [http://pear.alvine.io/](http://pe Active - ar.alvine.io/) - (alpha) - - PHP Docker-PHP [http://stage1.github.io/docker-ph Active - p/](http://stage1.github.io/docker - -php/) - - Perl Net::Docker [https://metacpan.org/pod/Net::Doc Active - ker](https://metacpan.org/pod/Net: - :Docker) - - Perl Eixo::Docker [https://github.com/alambike/eixo- Active - docker](https://github.com/alambik - e/eixo-docker) - - Scala reactive-doc [https://github.com/almoehi/reacti Active - ker ve-docker](https://github.com/almo - ehi/reactive-docker) - ------------------------------------------------------------------------- - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Language/FrameworkNameRepositoryStatus
Pythondocker-pyhttps://github.com/dotcloud/docker-pyActive
Rubydocker-clienthttps://github.com/geku/docker-clientOutdated
Rubydocker-apihttps://github.com/swipely/docker-apiActive
JavaScript (NodeJS)dockerodehttps://github.com/apocas/dockerode + Install via NPM: npm install dockerodeActive
JavaScript (NodeJS)docker.iohttps://github.com/appersonlabs/docker.io + Install via NPM: npm install docker.ioActive
JavaScriptdocker-jshttps://github.com/dgoujard/docker-jsOutdated
JavaScript (Angular) WebUIdocker-cphttps://github.com/13W/docker-cpActive
JavaScript (Angular) WebUIdockeruihttps://github.com/crosbymichael/dockeruiActive
Javadocker-javahttps://github.com/kpelykh/docker-javaActive
Erlangerldockerhttps://github.com/proger/erldockerActive
Gogo-dockerclienthttps://github.com/fsouza/go-dockerclientActive
Godockerclienthttps://github.com/samalba/dockerclientActive
PHPAlvinehttp://pear.alvine.io/ (alpha)Active
PHPDocker-PHPhttp://stage1.github.io/docker-php/Active
PerlNet::Dockerhttps://metacpan.org/pod/Net::DockerActive
PerlEixo::Dockerhttps://github.com/alambike/eixo-dockerActive
Scalareactive-dockerhttps://github.com/almoehi/reactive-dockerActive
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 5c332e5c2f..c976c118d7 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -4,23 +4,21 @@ page_keywords: builder, docker, Dockerfile, automation, image creation # Dockerfile Reference -**Docker can act as a builder** and read instructions from a text -`Dockerfile` to automate the steps you would -otherwise take manually to create an image. Executing -`docker build` will run your steps and commit them -along the way, giving you a final image. +**Docker can act as a builder** and read instructions from a text *Dockerfile* +to automate the steps you would otherwise take manually to create an image. +Executing `docker build` will run your steps and commit them along the way, +giving you a final image. ## Usage -To [*build*](../commandline/cli/#cli-build) an image from a source -repository, create a description file called `Dockerfile` -at the root of your repository. This file will describe the -steps to assemble the image. +To [*build*](../commandline/cli/#cli-build) an image from a source repository, +create a description file called Dockerfile at the root of your repository. +This file will describe the steps to assemble the image. -Then call `docker build` with the path of your -source repository as argument (for example, `.`): +Then call `docker build` with the path of you source repository as argument +(for example, `.`): -> `sudo docker build .` + sudo docker build . The path to the source repository defines where to find the *context* of the build. The build is run by the Docker daemon, not by the CLI, so the @@ -30,7 +28,7 @@ whole context must be transferred to the daemon. The Docker CLI reports You can specify a repository and tag at which to save the new image if the build succeeds: -> `sudo docker build -t shykes/myapp .` + sudo docker build -t shykes/myapp . The Docker daemon will run your steps one-by-one, committing the result to a new image if necessary, before finally outputting the ID of your @@ -38,12 +36,11 @@ new image. The Docker daemon will automatically clean up the context you sent. Note that each instruction is run independently, and causes a new image -to be created - so `RUN cd /tmp` will not have any -effect on the next instructions. +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. Whenever possible, Docker will re-use the intermediate images, -accelerating `docker build` significantly (indicated -by `Using cache`): +accelerating `docker build` significantly (indicated by `Using cache`): $ docker build -t SvenDowideit/ambassador . Uploading context 10.24 kB @@ -58,9 +55,9 @@ by `Using cache`): ---> 1a5ffc17324d Successfully built 1a5ffc17324d -When you’re done with your build, you’re ready to look into [*Pushing a -repository to its -registry*](../../use/workingwithrepository/#image-push). +When you're done with your build, you're ready to look into +[*Pushing a repository to its registry*]( +../../use/workingwithrepository/#image-push). ## Format @@ -83,84 +80,73 @@ be treated as an argument. This allows statements like: # Comment RUN echo 'we are running some # of cool things' -Here is the set of instructions you can use in a `Dockerfile` +Here is the set of instructions you can use in a Dockerfile for building images. -## `FROM` +## FROM -> `FROM ` + FROM Or -> `FROM :` + FROM : -The `FROM` instruction sets the [*Base -Image*](../../terms/image/#base-image-def) for subsequent instructions. -As such, a valid Dockerfile must have `FROM` as its -first instruction. The image can be any valid image – it is especially -easy to start by **pulling an image** from the [*Public -Repositories*](../../use/workingwithrepository/#using-public-repositories). +The `FROM` instruction sets the [*Base Image*](../../terms/image/#base-image-def) +for subsequent instructions. As such, a valid Dockerfile must have `FROM` as +its first instruction. The image can be any valid image – it is especially easy +to start by **pulling an image** from the [*Public Repositories*]( +../../use/workingwithrepository/#using-public-repositories). -`FROM` must be the first non-comment instruction in -the `Dockerfile`. +`FROM` must be the first non-comment instruction in the Dockerfile. -`FROM` can appear multiple times within a single -Dockerfile in order to create multiple images. Simply make a note of the -last image id output by the commit before each new `FROM` -command. +`FROM` can appear multiple times within a single Dockerfile in order to create +multiple images. Simply make a note of the last image id output by the commit +before each new `FROM` command. -If no `tag` is given to the `FROM` -instruction, `latest` is assumed. If the +If no `tag` is given to the `FROM` instruction, `latest` is assumed. If the used tag does not exist, an error will be returned. -## `MAINTAINER` +## MAINTAINER -> `MAINTAINER ` + MAINTAINER -The `MAINTAINER` instruction allows you to set the -*Author* field of the generated images. +The `MAINTAINER` instruction allows you to set the *Author* field of the +generated images. -## `RUN` +## RUN RUN has 2 forms: -- `RUN ` (the command is run in a shell - - `/bin/sh -c`) -- `RUN ["executable", "param1", "param2"]` (*exec* - form) +- `RUN ` (the command is run in a shell - `/bin/sh -c`) +- `RUN ["executable", "param1", "param2"]` (*exec* form) -The `RUN` instruction will execute any commands in a -new layer on top of the current image and commit the results. The -resulting committed image will be used for the next step in the -Dockerfile. +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the Dockerfile. -Layering `RUN` instructions and generating commits -conforms to the core concepts of Docker where commits are cheap and -containers can be created from any point in an image’s history, much -like source control. +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. -The *exec* form makes it possible to avoid shell string munging, and to -`RUN` commands using a base image that does not -contain `/bin/sh`. +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain `/bin/sh`. ### Known Issues (RUN) -- [Issue 783](https://github.com/dotcloud/docker/issues/783) is about - file permissions problems that can occur when using the AUFS file - system. You might notice it during an attempt to `rm` - a file, for example. The issue describes a workaround. -- [Issue 2424](https://github.com/dotcloud/docker/issues/2424) Locale - will not be set automatically. +- [Issue 783](https://github.com/dotcloud/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. The issue + describes a workaround. +- [Issue 2424](https://github.com/dotcloud/docker/issues/2424) Locale will + not be set automatically. -## `CMD` +## CMD CMD has three forms: -- `CMD ["executable","param1","param2"]` (like an - *exec*, preferred form) -- `CMD ["param1","param2"]` (as *default - parameters to ENTRYPOINT*) -- `CMD command param1 param2` (as a *shell*) +- `CMD ["executable","param1","param2"]` (like an *exec*, preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (as a *shell*) There can only be one CMD in a Dockerfile. If you list more than one CMD then only the last CMD will take effect. @@ -169,83 +155,75 @@ then only the last CMD will take effect. container.** These defaults can include an executable, or they can omit the executable, in which case you must specify an ENTRYPOINT as well. -When used in the shell or exec formats, the `CMD` -instruction sets the command to be executed when running the image. +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. -If you use the *shell* form of the CMD, then the `` -will execute in `/bin/sh -c`: +If you use the *shell* form of the CMD, then the `` will execute in +`/bin/sh -c`: FROM ubuntu CMD echo "This is a test." | wc - -If you want to **run your** `` **without a -shell** then you must express the command as a JSON array and give the -full path to the executable. **This array form is the preferred format -of CMD.** Any additional parameters must be individually expressed as -strings in the array: +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of CMD.** Any additional parameters +must be individually expressed as strings in the array: FROM ubuntu CMD ["/usr/bin/wc","--help"] -If you would like your container to run the same executable every time, -then you should consider using `ENTRYPOINT` in -combination with `CMD`. See +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See [*ENTRYPOINT*](#entrypoint). -If the user specifies arguments to `docker run` then -they will override the default specified in CMD. +If the user specifies arguments to `docker run` then they will override the +default specified in CMD. > **Note**: -> Don’t confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits > the result; `CMD` does not execute anything at build time, but specifies > the intended command for the image. -## `EXPOSE` +## EXPOSE -> `EXPOSE [...]` + EXPOSE [...] -The `EXPOSE` instructions informs Docker that the -container will listen on the specified network ports at runtime. Docker -uses this information to interconnect containers using links (see +The `EXPOSE` instructions informs Docker that the container will listen on the +specified network ports at runtime. Docker uses this information to interconnect +containers using links (see [*links*](../../use/working_with_links_names/#working-with-links-names)), -and to setup port redirection on the host system (see [*Redirect -Ports*](../../use/port_redirection/#port-redirection)). +and to setup port redirection on the host system (see [*Redirect Ports*]( +../../use/port_redirection/#port-redirection)). -## `ENV` +## ENV -> `ENV ` + ENV -The `ENV` instruction sets the environment variable -`` to the value ``. -This value will be passed to all future `RUN` -instructions. This is functionally equivalent to prefixing the command -with `=` +The `ENV` instruction sets the environment variable `` to the value +``. This value will be passed to all future `RUN` instructions. This is +functionally equivalent to prefixing the command with `=` -The environment variables set using `ENV` will -persist when a container is run from the resulting image. You can view -the values using `docker inspect`, and change them -using `docker run --env =`. +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. > **Note**: > One example where this can cause unexpected consequenses, is setting -> `ENV DEBIAN_FRONTEND noninteractive`. Which will -> persist when the container is run interactively; for example: -> `docker run -t -i image bash` +> `ENV DEBIAN_FRONTEND noninteractive`. Which will persist when the container +> is run interactively; for example: `docker run -t -i image bash` -## `ADD` +## ADD -> `ADD ` + ADD -The `ADD` instruction will copy new files from -\ and add them to the container’s filesystem at path -``. +The `ADD` instruction will copy new files from `` and add them to the +container's filesystem at path ``. -`` must be the path to a file or directory -relative to the source directory being built (also called the *context* -of the build) or a remote file URL. +`` must be the path to a file or directory relative to the source directory +being built (also called the *context* of the build) or a remote file URL. -`` is the absolute path to which the source -will be copied inside the destination container. +`` is the absolute path to which the source will be copied inside the +destination container. All new files and directories are created with mode 0755, uid and gid 0. @@ -262,79 +240,64 @@ All new files and directories are created with mode 0755, uid and gid 0. The copy obeys the following rules: -- The `` path must be inside the *context* of - the build; you cannot `ADD ../something /something` -, because the first step of a `docker build` - is to send the context directory (and subdirectories) to - the docker daemon. +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. -- If `` is a URL and `` - does not end with a trailing slash, then a file is - downloaded from the URL and copied to ``. +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. -- If `` is a URL and `` - does end with a trailing slash, then the filename is - inferred from the URL and the file is downloaded to - `/`. For instance, - `ADD http://example.com/foobar /` would create - the file `/foobar`. The URL must have a - nontrivial path so that an appropriate filename can be discovered in - this case (`http://example.com` will not work). +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). -- If `` is a directory, the entire directory - is copied, including filesystem metadata. +- If `` is a directory, the entire directory is copied, including + filesystem metadata. -- If `` is a *local* tar archive in a - recognized compression format (identity, gzip, bzip2 or xz) then it - is unpacked as a directory. Resources from *remote* URLs are **not** - decompressed. +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`: the result is the union of: - When a directory is copied or unpacked, it has the same behavior as - `tar -x`: the result is the union of + 1. whatever existed at the destination path and + 2. the contents of the source tree, with conflicts resolved in favor of + "2." on a file-by-file basis. - 1. whatever existed at the destination path and - 2. the contents of the source tree, +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. - with conflicts resolved in favor of "2." on a file-by-file basis. +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. -- If `` is any other kind of file, it is - copied individually along with its metadata. In this case, if - `` ends with a trailing slash - `/`, it will be considered a directory and the - contents of `` will be written at - `/base()`. +- If `` doesn't exist, it is created along with all missing directories + in its path. -- If `` does not end with a trailing slash, - it will be considered a regular file and the contents of - `` will be written at `` -. - -- If `` doesn’t exist, it is created along - with all missing directories in its path. - -## `ENTRYPOINT` +## ENTRYPOINT ENTRYPOINT has two forms: -- `ENTRYPOINT ["executable", "param1", "param2"]` - (like an *exec*, preferred form) -- `ENTRYPOINT command param1 param2` (as a - *shell*) +- `ENTRYPOINT ["executable", "param1", "param2"]` + (like an *exec*, preferred form) +- `ENTRYPOINT command param1 param2` + (as a *shell*) -There can only be one `ENTRYPOINT` in a Dockerfile. -If you have more than one `ENTRYPOINT`, then only -the last one in the Dockerfile will have an effect. +There can only be one `ENTRYPOINT` in a Dockerfile. If you have more than one +`ENTRYPOINT`, then only the last one in the Dockerfile will have an effect. -An `ENTRYPOINT` helps you to configure a container -that you can run as an executable. That is, when you specify an -`ENTRYPOINT`, then the whole container runs as if it -was just that executable. +An `ENTRYPOINT` helps you to configure a container that you can run as an +executable. That is, when you specify an `ENTRYPOINT`, then the whole container +runs as if it was just that executable. The `ENTRYPOINT` instruction adds an entry command that will **not** be -overwritten when arguments are passed to `docker run`, unlike the -behavior of `CMD`. This allows arguments to be passed to the entrypoint. -i.e. `docker run -d` will pass the "-d" argument to the -ENTRYPOINT. +overwritten when arguments are passed to `docker run`, unlike the behavior +of `CMD`. This allows arguments to be passed to the entrypoint. i.e. +`docker run -d` will pass the "-d" argument to the ENTRYPOINT. You can specify parameters either in the ENTRYPOINT JSON array (as in "like an exec" above), or by using a CMD statement. Parameters in the @@ -342,13 +305,13 @@ ENTRYPOINT will not be overridden by the `docker run` arguments, but parameters specified via CMD will be overridden by `docker run` arguments. -Like a `CMD`, you can specify a plain string for the -ENTRYPOINT and it will execute in `/bin/sh -c`: +Like a `CMD`, you can specify a plain string for the `ENTRYPOINT` and it will +execute in `/bin/sh -c`: FROM ubuntu ENTRYPOINT wc -l - -For example, that Dockerfile’s image will *always* take stdin as input +For example, that Dockerfile's image will *always* take stdin as input ("-") and print the number of lines ("-l"). If you wanted to make this optional but default, you could use a CMD: @@ -356,44 +319,41 @@ optional but default, you could use a CMD: CMD ["-l", "-"] ENTRYPOINT ["/usr/bin/wc"] -## `VOLUME` +## VOLUME -> `VOLUME ["/data"]` + VOLUME ["/data"] -The `VOLUME` instruction will create a mount point -with the specified name and mark it as holding externally mounted -volumes from native host or other containers. For more -information/examples and mounting instructions via docker client, refer -to [*Share Directories via -Volumes*](../../use/working_with_volumes/#volume-def) documentation. +The `VOLUME` instruction will create a mount point with the specified name +and mark it as holding externally mounted volumes from native host or other +containers. For more information/examples and mounting instructions via docker +client, refer to [*Share Directories via Volumes*]( +../../use/working_with_volumes/#volume-def) documentation. -## `USER` +## USER -> `USER daemon` + USER daemon -The `USER` instruction sets the username or UID to -use when running the image. +The `USER` instruction sets the username or UID to use when running the image. -## `WORKDIR` +## WORKDIR -> `WORKDIR /path/to/workdir` + WORKDIR /path/to/workdir -The `WORKDIR` instruction sets the working directory -for the `RUN`, `CMD` and +The `WORKDIR` instruction sets the working directory for the `RUN`, `CMD` and `ENTRYPOINT` Dockerfile commands that follow it. It can be used multiple times in the one Dockerfile. If a relative path -is provided, it will be relative to the path of the previous -`WORKDIR` instruction. For example: +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: -> WORKDIR /a WORKDIR b WORKDIR c RUN pwd + WORKDIR /a WORKDIR b WORKDIR c RUN pwd The output of the final `pwd` command in this Dockerfile would be `/a/b/c`. -## `ONBUILD` +## ONBUILD -> `ONBUILD [INSTRUCTION]` + ONBUILD [INSTRUCTION] The `ONBUILD` instruction adds to the image a "trigger" instruction to be executed at a later time, when the image is @@ -410,7 +370,7 @@ daemon which may be customized with user-specific configuration. For example, if your image is a reusable python application builder, it will require application source code to be added in a particular directory, and it might require a build script to be called *after* -that. You can’t just call *ADD* and *RUN* now, because you don’t yet +that. You can't just call *ADD* and *RUN* now, because you don't yet have access to the application source code, and it will be different for each application build. You could simply provide application developers with a boilerplate Dockerfile to copy-paste into their application, but @@ -420,23 +380,23 @@ mixes with application-specific code. The solution is to use *ONBUILD* to register in advance instructions to run later, during the next build stage. -Here’s how it works: +Here's how it works: -1. When it encounters an *ONBUILD* instruction, the builder adds a - trigger to the metadata of the image being built. The instruction - does not otherwise affect the current build. -2. At the end of the build, a list of all triggers is stored in the - image manifest, under the key *OnBuild*. They can be inspected with - *docker inspect*. -3. Later the image may be used as a base for a new build, using the - *FROM* instruction. As part of processing the *FROM* instruction, - the downstream builder looks for *ONBUILD* triggers, and executes - them in the same order they were registered. If any of the triggers - fail, the *FROM* instruction is aborted which in turn causes the - build to fail. If all triggers succeed, the FROM instruction - completes and the build continues as usual. -4. Triggers are cleared from the final image after being executed. In - other words they are not inherited by "grand-children" builds. +1. When it encounters an *ONBUILD* instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key *OnBuild*. They can be inspected with + *docker inspect*. +3. Later the image may be used as a base for a new build, using the + *FROM* instruction. As part of processing the *FROM* instruction, + the downstream builder looks for *ONBUILD* triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the *FROM* instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the FROM instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. For example you might add something like this: @@ -445,7 +405,7 @@ For example you might add something like this: ONBUILD RUN /usr/local/bin/python-build --dir /app/src [...] -> **Warning**: Chaining ONBUILD instructions using ONBUILD ONBUILD isn’t allowed. +> **Warning**: Chaining ONBUILD instructions using ONBUILD ONBUILD isn't allowed. > **Warning**: ONBUILD may not trigger FROM or MAINTAINER instructions. diff --git a/docs/sources/reference/commandline.md b/docs/sources/reference/commandline.md index 8620a095b9..b15f529394 100644 --- a/docs/sources/reference/commandline.md +++ b/docs/sources/reference/commandline.md @@ -3,5 +3,5 @@ ## Contents: -- [Command Line](cli/) +- [Command Line](cli/) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index e0d896755b..d59bd37674 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -4,8 +4,8 @@ page_keywords: Docker, Docker documentation, CLI, command line # Command Line -To list available commands, either run `docker` with -no parameters or execute `docker help`: +To list available commands, either run `docker` with no parameters +or execute `docker help`: $ sudo docker Usage: docker [OPTIONS] COMMAND [arg...] @@ -33,13 +33,11 @@ will set the value to the opposite of the default value. ### Multi -Options like `-a=[]` indicate they can be specified -multiple times: +Options like `-a=[]` indicate they can be specified multiple times: docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash -Sometimes this can use a more complex value string, as for -`-v`: +Sometimes this can use a more complex value string, as for `-v`: docker run -v /host:/container example/mysql @@ -49,9 +47,10 @@ Options like `--name=""` expect a string, and they can only be specified once. Options like `-c=0` expect an integer, and they can only be specified once. -## `daemon` +## daemon Usage of docker: + -D, --debug=false: Enable debug mode -H, --host=[]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise. systemd socket activation can be used with fd://[socketfd]. -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group @@ -95,9 +94,8 @@ To run the daemon with debug output, use `docker -d -D`. To use lxc as the execution driver, use `docker -d -e lxc`. -The docker client will also honor the `DOCKER_HOST` -environment variable to set the `-H` flag for the -client. +The docker client will also honor the `DOCKER_HOST` environment variable to set +the `-H` flag for the client. docker -H tcp://0.0.0.0:4243 ps # or @@ -105,32 +103,32 @@ client. docker ps # both are equal -To run the daemon with [systemd socket -activation](http://0pointer.de/blog/projects/socket-activation.html), -use `docker -d -H fd://`. Using `fd://` -will work perfectly for most setups but you can also specify -individual sockets too `docker -d -H fd://3`. If the -specified socket activated files aren’t found then docker will exit. You +To run the daemon with [systemd socket activation]( +http://0pointer.de/blog/projects/socket-activation.html), use +`docker -d -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets too `docker -d -H fd://3`. If the +specified socket activated files aren't found then docker will exit. You can find examples of using systemd socket activation with docker and -systemd in the [docker source -tree](https://github.com/dotcloud/docker/blob/master/contrib/init/systemd/socket-activation/). +systemd in the [docker source tree]( +https://github.com/dotcloud/docker/blob/master/contrib/init/systemd/socket-activation/). Docker supports softlinks for the Docker data directory -(`/var/lib/docker`) and for `/tmp`. TMPDIR and the data directory can be set like this: +(`/var/lib/docker`) and for `/tmp`. TMPDIR and the data directory can be set +like this: TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 # or export TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 -## `attach` +## attach + +Attach to a running container. Usage: docker attach CONTAINER - Attach to a running container. - - --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --no-stdin=false: Do not attach stdin + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) The `attach` command will allow you to view or interact with any running container, detached (`-d`) @@ -141,7 +139,7 @@ progress of your daemonized process. You can detach from the container again (and leave it running) with `CTRL-C` (for a quiet exit) or `CTRL-\` to get a stacktrace of the Docker client when it quits. When -you detach from the container’s process the exit code will be returned +you detach from the container's process the exit code will be returned to the client. To stop a container, use `docker stop`. @@ -182,34 +180,34 @@ To kill the container, use `docker kill`. ^C$ $ sudo docker stop $ID -## `build` +## build + +Build a new container image from the source code at PATH Usage: docker build [OPTIONS] PATH | URL | - - Build a new container image from the source code at PATH - -t, --tag="": Repository name (and optionally a tag) to be applied - to the resulting image in case of success. - -q, --quiet=false: Suppress the verbose output generated by the containers. - --no-cache: Do not use the cache when building the image. - --rm=true: Remove intermediate containers after a successful build -Use this command to build Docker images from a `Dockerfile` + -t, --tag="": Repository name (and optionally a tag) to be applied + to the resulting image in case of success. + -q, --quiet=false: Suppress the verbose output generated by the containers. + --no-cache: Do not use the cache when building the image. + --rm=true: Remove intermediate containers after a successful build + +Use this command to build Docker images from a Dockerfile and a "context". -The files at `PATH` or `URL` are -called the "context" of the build. The build process may refer to any of -the files in the context, for example when using an -[*ADD*](../../builder/#dockerfile-add) instruction. When a single -`Dockerfile` is given as `URL`, -then no context is set. +The files at `PATH` or `URL` are called the "context" of the build. The build +process may refer to any of the files in the context, for example when using an +[*ADD*](../../builder/#dockerfile-add) instruction. When a single Dockerfile is +given as `URL`, then no context is set. When a Git repository is set as `URL`, then the repository is used as the context. The Git repository is cloned with its submodules (git clone –recursive). A fresh git clone occurs in a temporary directory on your local host, and then this is sent to the Docker daemon as the context. This way, your local user credentials and -vpn’s etc can be used to access private repositories +vpn's etc can be used to access private repositories -See also +See also: [*Dockerfile Reference*](../../builder/#dockerbuilder). @@ -243,14 +241,14 @@ See also This example specifies that the `PATH` is `.`, and so all the files in the local directory get -tar’d and sent to the Docker daemon. The `PATH` +tar`d and sent to the Docker daemon. The `PATH` specifies where to find the files for the "context" of the build on the Docker daemon. Remember that the daemon could be running on a remote -machine and that no parsing of the `Dockerfile` -happens at the client side (where you’re running +machine and that no parsing of the Dockerfile +happens at the client side (where you're running `docker build`). That means that *all* the files at `PATH` get sent, not just the ones listed to -[*ADD*](../../builder/#dockerfile-add) in the `Dockerfile`. +[*ADD*](../../builder/#dockerfile-add) in the Dockerfile. The transfer of context from the local machine to the Docker daemon is what the `docker` client means when you see the @@ -268,30 +266,30 @@ and the tag will be `2.0` $ sudo docker build - < Dockerfile -This will read a `Dockerfile` from *stdin* without +This will read a Dockerfile from *stdin* without context. Due to the lack of a context, no contents of any local directory will be sent to the `docker` daemon. Since -there is no context, a `Dockerfile` `ADD` +there is no context, a Dockerfile `ADD` only works if it refers to a remote URL. $ sudo docker build github.com/creack/docker-firefox This will clone the GitHub repository and use the cloned repository as -context. The `Dockerfile` at the root of the -repository is used as `Dockerfile`. Note that you +context. The Dockerfile at the root of the +repository is used as Dockerfile. Note that you can specify an arbitrary Git repository by using the `git://` schema. -## `commit` +## commit + +Create a new image from a container᾿s changes Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - Create a new image from a container᾿s changes + -m, --message="": Commit message + -a, --author="": Author (eg. "John Hannibal Smith " - -m, --message="": Commit message - -a, --author="": Author (eg. "John Hannibal Smith " - -It can be useful to commit a container’s file changes or settings into a +It can be useful to commit a container's file changes or settings into a new image. This allows you debug a container by running an interactive shell, or to export a working dataset to another server. Generally, it is better to use Dockerfiles to manage your images in a documented and @@ -309,27 +307,27 @@ maintainable way. REPOSITORY TAG ID CREATED VIRTUAL SIZE SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB -## `cp` +## cp + +Copy files/folders from the containers filesystem to the host +path. Paths are relative to the root of the filesystem. Usage: docker cp CONTAINER:PATH HOSTPATH - Copy files/folders from the containers filesystem to the host - path. Paths are relative to the root of the filesystem. - $ sudo docker cp 7bb0e258aefe:/etc/debian_version . $ sudo docker cp blue_frog:/etc/hosts . -## `diff` +## diff + +List the changed files and directories in a container᾿s filesystem Usage: docker diff CONTAINER - List the changed files and directories in a container᾿s filesystem +There are 3 events that are listed in the `diff`: -There are 3 events that are listed in the ‘diff’: - -1. `` `A` `` - Add -2. `` `D` `` - Delete -3. `` `C` `` - Change +1. `A` - Add +2. `D` - Delete +3. `C` - Change For example: @@ -347,12 +345,12 @@ For example: A /go/src/github.com/dotcloud/docker/.git .... -## `events` +## events + +Get real time events from the server Usage: docker events - Get real time events from the server - --since="": Show all events created since timestamp (either seconds since epoch, or date string as below) --until="": Show events created before timestamp @@ -360,24 +358,24 @@ For example: ### Examples -You’ll need two shells for this example. +You'll need two shells for this example. -#### Shell 1: Listening for events +**Shell 1: Listening for events:** $ sudo docker events -#### Shell 2: Start and Stop a Container +**Shell 2: Start and Stop a Container:** $ sudo docker start 4386fb97867d $ sudo docker stop 4386fb97867d -#### Shell 1: (Again .. now showing events) +**Shell 1: (Again .. now showing events):** [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop -#### Show events in the past from a specified time +**Show events in the past from a specified time:** $ sudo docker events --since 1378216169 [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die @@ -392,24 +390,24 @@ You’ll need two shells for this example. [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop -## `export` +## export + +Export the contents of a filesystem as a tar archive to STDOUT Usage: docker export CONTAINER - Export the contents of a filesystem as a tar archive to STDOUT - For example: $ sudo docker export red_panda > latest.tar -## `history` +## history + +Show the history of an image Usage: docker history [OPTIONS] IMAGE - Show the history of an image - - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + --no-trunc=false: Don᾿t truncate output + -q, --quiet=false: Only show numeric IDs To see how the `docker:latest` image was built: @@ -422,15 +420,15 @@ To see how the `docker:latest` image was built: 750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian 0 B 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 9 months ago 0 B -## `images` +## images + +List images Usage: docker images [OPTIONS] [NAME] - List images - - -a, --all=false: Show all images (by default filter out the intermediate image layers) - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only show numeric IDs + -a, --all=false: Show all images (by default filter out the intermediate image layers) + --no-trunc=false: Don᾿t truncate output + -q, --quiet=false: Only show numeric IDs The default `docker images` will show all top level images, their repository and tags, and their virtual size. @@ -468,7 +466,7 @@ by default. tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB -## `import` +## import Usage: docker import URL|- [REPOSITORY[:TAG]] @@ -483,19 +481,19 @@ data from *stdin*. ### Examples -#### Import from a remote location +**Import from a remote location:** This will create a new untagged image. $ sudo docker import http://example.com/exampleimage.tgz -#### Import from a local file +**Import from a local file:** Import to docker via pipe and *stdin*. $ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new -#### Import from a local directory +**Import from a local directory:** $ sudo tar -c . | docker import - exampleimagedir @@ -504,12 +502,12 @@ the ownership of the files (especially root ownership) during the archiving with tar. If you are not root (or the sudo command) when you tar, then the ownerships might not get preserved. -## `info` +## info + +Display system-wide information. Usage: docker info - Display system-wide information. - $ sudo docker info Containers: 292 Images: 194 @@ -522,44 +520,43 @@ tar, then the ownerships might not get preserved. Kernel Version: 3.8.0-33-generic WARNING: No swap limit support -When sending issue reports, please use `docker version` -and `docker info` to ensure we know how -your setup is configured. +When sending issue reports, please use `docker version` and `docker info` to +ensure we know how your setup is configured. -## `inspect` +## inspect + +Return low-level information on a container/image Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...] - Return low-level information on a container/image - - -f, --format="": Format the output using the given go template. + -f, --format="": Format the output using the given go template. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. -Go’s [text/template](http://golang.org/pkg/text/template/) package +Go's[text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ### Examples -#### Get an instance’s IP Address +**Get an instance'sIP Address:** For the most part, you can pick out any field from the JSON in a fairly straightforward manner. $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID -#### List All Port Bindings +**List All Port Bindings:** One can loop over arrays and maps in the results to produce simple text output: $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID -#### Find a Specific Port Mapping +**Find a Specific Port Mapping:** -The `.Field` syntax doesn’t work when the field name -begins with a number, but the template language’s `index` +The `.Field` syntax doesn't work when the field name +begins with a number, but the template language's `index` function does. The `.NetworkSettings.Ports` section contains a map of the internal port mappings to a list of external address/port objects, so to grab just the numeric public @@ -570,43 +567,43 @@ the public address. $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID -#### Get config +**Get config:** -The `.Field` syntax doesn’t work when the field -contains JSON data, but the template language’s custom `json` +The `.Field` syntax doesn't work when the field +contains JSON data, but the template language's custom `json` function does. The `.config` section contains complex json object, so to grab it as JSON, you use `json` to convert config object into JSON $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID -## `kill` +## kill + +Kill a running container (send SIGKILL, or specified signal) Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - Kill a running container (send SIGKILL, or specified signal) - - -s, --signal="KILL": Signal to send to the container + -s, --signal="KILL": Signal to send to the container The main process inside the container will be sent SIGKILL, or any signal specified with option `--signal`. ### Known Issues (kill) -- [Issue 197](https://github.com/dotcloud/docker/issues/197) indicates - that `docker kill` may leave directories behind - and make it difficult to remove the container. -- [Issue 3844](https://github.com/dotcloud/docker/issues/3844) lxc - 1.0.0 beta3 removed `lcx-kill` which is used by - Docker versions before 0.8.0; see the issue for a workaround. +- [Issue 197](https://github.com/dotcloud/docker/issues/197) indicates + that `docker kill` may leave directories behind + and make it difficult to remove the container. +- [Issue 3844](https://github.com/dotcloud/docker/issues/3844) lxc + 1.0.0 beta3 removed `lcx-kill` which is used by + Docker versions before 0.8.0; see the issue for a workaround. -## `load` +## load + +Load an image from a tar archive on STDIN Usage: docker load - Load an image from a tar archive on STDIN - - -i, --input="": Read from a tar archive file, instead of STDIN + -i, --input="": Read from a tar archive file, instead of STDIN Loads a tarred repository from a file or the standard input stream. Restores both images and tags. @@ -626,28 +623,28 @@ Restores both images and tags. fedora heisenbug 58394af37342 7 weeks ago 385.5 MB fedora latest 58394af37342 7 weeks ago 385.5 MB -## `login` +## login + +Register or Login to the docker registry server Usage: docker login [OPTIONS] [SERVER] - Register or Login to the docker registry server - -e, --email="": Email -p, --password="": Password -u, --username="": Username - If you want to login to a private registry you can - specify this by adding the server name. +If you want to login to a private registry you can +specify this by adding the server name. example: docker login localhost:8080 -## `logs` +## logs + +Fetch the logs of a container Usage: docker logs [OPTIONS] CONTAINER - Fetch the logs of a container - -f, --follow=false: Follow log output The `docker logs` command batch-retrieves all logs @@ -655,28 +652,28 @@ present at the time of execution. The `docker logs --follow` command combines `docker logs` and `docker attach`: it will first return all logs from the beginning and then -continue streaming new output from the container’s stdout and stderr. +continue streaming new output from the container'sstdout and stderr. -## `port` +## port Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT +Lookup the public-facing port which is NAT-ed to PRIVATE_PORT -## `ps` +## ps + +List containers Usage: docker ps [OPTIONS] - List containers - - -a, --all=false: Show all containers. Only running containers are shown by default. - --before="": Show only container created before Id or Name, include non-running ones. - -l, --latest=false: Show only the latest created container, include non-running ones. - -n=-1: Show n last created containers, include non-running ones. - --no-trunc=false: Don᾿t truncate output - -q, --quiet=false: Only display numeric IDs - -s, --size=false: Display sizes, not to be used with -q - --since="": Show only containers created since Id or Name, include non-running ones. + -a, --all=false: Show all containers. Only running containers are shown by default. + --before="": Show only container created before Id or Name, include non-running ones. + -l, --latest=false: Show only the latest created container, include non-running ones. + -n=-1: Show n last created containers, include non-running ones. + --no-trunc=false: Don᾿t truncate output + -q, --quiet=false: Only display numeric IDs + -s, --size=false: Display sizes, not to be used with -q + --since="": Show only containers created since Id or Name, include non-running ones. Running `docker ps` showing 2 linked containers. @@ -685,21 +682,20 @@ Running `docker ps` showing 2 linked containers. 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db -`docker ps` will show only running containers by -default. To see all containers: `docker ps -a` +`docker ps` will show only running containers by default. To see all containers: +`docker ps -a` -## `pull` +## pull + +Pull an image or a repository from the registry Usage: docker pull NAME[:TAG] - Pull an image or a repository from the registry - Most of your images will be created on top of a base image from the -\([https://index.docker.io](https://index.docker.io)). +Docker Index ([https://index.docker.io](https://index.docker.io)). The Docker Index contains many pre-built images that you can -`pull` and try without needing to define and -configure your own. +`pull` and try without needing to define and configure your own. To download a particular image, or set of images (i.e., a repository), use `docker pull`: @@ -711,31 +707,32 @@ use `docker pull`: # it is based on. (typically the empty `scratch` image, a MAINTAINERs layer, # and the un-tared base. -## `push` +## push + +Push an image or a repository to the registry Usage: docker push NAME[:TAG] - Push an image or a repository to the registry - Use `docker push` to share your images on public or private registries. -## `restart` +## restart + +Restart a running container Usage: docker restart [OPTIONS] NAME - Restart a running container + -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 - -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 +## rm -## `rm` +Remove one or more containers Usage: docker rm [OPTIONS] CONTAINER - Remove one or more containers - -l, --link="": Remove the link instead of the actual container - -f, --force=false: Force removal of running container - -v, --volumes=false: Remove the volumes associated to the container + -l, --link="": Remove the link instead of the actual container + -f, --force=false: Force removal of running container + -v, --volumes=false: Remove the volumes associated to the container ### Known Issues (rm) @@ -765,18 +762,18 @@ This command will delete all stopped containers. The command IDs and pass them to the `rm` command which will delete them. Any running containers will not be deleted. -## `rmi` +## rmi + +Remove one or more images Usage: docker rmi IMAGE [IMAGE...] - Remove one or more images - - -f, --force=false: Force - --no-prune=false: Do not delete untagged parents + -f, --force=false: Force + --no-prune=false: Do not delete untagged parents ### Removing tagged images -Images can be removed either by their short or long ID’s, or their image +Images can be removed either by their short or long ID`s, or their image names. If an image has more than one name, each of them needs to be removed before the image is removed. @@ -802,86 +799,79 @@ removed before the image is removed. Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 -## `run` +## run + +Run a command in a new container Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - Run a command in a new container + -a, --attach=map[]: Attach to stdin, stdout or stderr + -c, --cpu-shares=0: CPU shares (relative weight) + --cidfile="": Write the container ID to the file + -d, --detach=false: Detached mode: Run container in the background, print new container id + -e, --env=[]: Set environment variables + --env-file="": Read in a line delimited file of ENV variables + -h, --hostname="": Container host name + -i, --interactive=false: Keep stdin open even if not attached + --privileged=false: Give extended privileges to this container + -m, --memory="": Memory limit (format: , where unit = b, k, m or g) + -n, --networking=true: Enable networking for this container + -p, --publish=[]: Map a network port to the container + --rm=false: Automatically remove the container when it exits (incompatible with -d) + -t, --tty=false: Allocate a pseudo-tty + -u, --user="": Username or UID + --dns=[]: Set custom dns servers for the container + --dns-search=[]: Set custom DNS search domains for the container + -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. + --volumes-from="": Mount all volumes from the given container(s) + --entrypoint="": Overwrite the default entrypoint set by the image + -w, --workdir="": Working directory inside the container + --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + --expose=[]: Expose a port from the container without publishing it to your host + --link="": Add link to another container (name:alias) + --name="": Assign the specified name to the container. If no name is specific docker will generate a random name + -P, --publish-all=false: Publish all exposed ports to the host interfaces - -a, --attach=map[]: Attach to stdin, stdout or stderr - -c, --cpu-shares=0: CPU shares (relative weight) - --cidfile="": Write the container ID to the file - -d, --detach=false: Detached mode: Run container in the background, print new container id - -e, --env=[]: Set environment variables - --env-file="": Read in a line delimited file of ENV variables - -h, --hostname="": Container host name - -i, --interactive=false: Keep stdin open even if not attached - --privileged=false: Give extended privileges to this container - -m, --memory="": Memory limit (format: , where unit = b, k, m or g) - -n, --networking=true: Enable networking for this container - -p, --publish=[]: Map a network port to the container - --rm=false: Automatically remove the container when it exits (incompatible with -d) - -t, --tty=false: Allocate a pseudo-tty - -u, --user="": Username or UID - --dns=[]: Set custom dns servers for the container - --dns-search=[]: Set custom DNS search domains for the container - -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. - --volumes-from="": Mount all volumes from the given container(s) - --entrypoint="": Overwrite the default entrypoint set by the image - -w, --workdir="": Working directory inside the container - --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - --expose=[]: Expose a port from the container without publishing it to your host - --link="": Add link to another container (name:alias) - --name="": Assign the specified name to the container. If no name is specific docker will generate a random name - -P, --publish-all=false: Publish all exposed ports to the host interfaces +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. -The `docker run` command first `creates` -a writeable container layer over the specified image, and then -`starts` it using the specified command. That is, -`docker run` is equivalent to the API -`/containers/create` then -`/containers/(id)/start`. A stopped container can be -restarted with all its previous changes intact using -`docker start`. See `docker ps -a` -to view a list of all containers. - -The `docker run` command can be used in combination -with `docker commit` to [*change the command that a -container runs*](#commit-an-existing-container). +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](#commit-an-existing-container). See [*Redirect Ports*](../../../use/port_redirection/#port-redirection) -for more detailed information about the `--expose`, -`-p`, `-P` and -`--link` parameters, and [*Link -Containers*](../../../use/working_with_links_names/#working-with-links-names) -for specific examples using `--link`. +for more detailed information about the `--expose`, `-p`, `-P` and `--link` +parameters, and [*Link Containers*]( +../../../use/working_with_links_names/#working-with-links-names) for specific +examples using `--link`. ### Known Issues (run –volumes-from) -- [Issue 2702](https://github.com/dotcloud/docker/issues/2702): - "lxc-start: Permission denied - failed to mount" could indicate a - permissions problem with AppArmor. Please see the issue for a - workaround. +- [Issue 2702](https://github.com/dotcloud/docker/issues/2702): + "lxc-start: Permission denied - failed to mount" could indicate a + permissions problem with AppArmor. Please see the issue for a + workaround. ### Examples: $ sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" -This will create a container and print `test` to the -console. The `cidfile` flag makes Docker attempt to -create a new file and write the container ID to it. If the file exists -already, Docker will return an error. Docker will close this file when -`docker run` exits. +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. $ sudo docker run -t -i --rm ubuntu bash root@bc338942ef20:/# mount -t tmpfs none /mnt mount: permission denied -This will *not* work, because by default, most potentially dangerous -kernel capabilities are dropped; including `cap_sys_admin` -(which is required to mount filesystems). However, the -`--privileged` flag will allow it to run: +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: $ sudo docker run --privileged ubuntu bash root@50e3f57e16e6:/# mount -t tmpfs none /mnt @@ -889,30 +879,27 @@ kernel capabilities are dropped; including `cap_sys_admin` Filesystem Size Used Avail Use% Mounted on none 1.9G 0 1.9G 0% /mnt -The `--privileged` flag gives *all* capabilities to -the container, and it also lifts all the limitations enforced by the -`device` cgroup controller. In other words, the -container can then do almost everything that the host can do. This flag -exists to allow special use-cases, like running Docker within Docker. +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. $ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd -The `-w` lets the command being executed inside -directory given, here `/path/to/dir/`. If the path -does not exists it is created inside the container. +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exists it is created inside the container. $ sudo docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd -The `-v` flag mounts the current working directory -into the container. The `-w` lets the command being -executed inside the current working directory, by changing into the -directory to the value returned by `pwd`. So this +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this combination executes the command using the container, but inside the current working directory. $ sudo docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash -When the host directory of a bind-mounted volume doesn’t exist, Docker +When the host directory of a bind-mounted volume doesn't exist, Docker will automatically create this directory on the host for you. In the example above, Docker will create the `/doesnt/exist` folder before starting your container. @@ -920,49 +907,43 @@ folder before starting your container. $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh By bind-mounting the docker unix socket and statically linked docker -binary (such as that provided by -[https://get.docker.io](https://get.docker.io)), you give the container -the full access to create and manipulate the host’s docker daemon. +binary (such as that provided by [https://get.docker.io]( +https://get.docker.io)), you give the container the full access to create and +manipulate the host's docker daemon. $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash -This binds port `8080` of the container to port -`80` on `127.0.0.1` of the host -machine. [*Redirect -Ports*](../../../use/port_redirection/#port-redirection) explains in -detail how to manipulate ports in Docker. +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. [*Redirect Ports*](../../../use/port_redirection/#port-redirection) +explains in detail how to manipulate ports in Docker. $ sudo docker run --expose 80 ubuntu bash -This exposes port `80` of the container for use -within a link without publishing the port to the host system’s -interfaces. [*Redirect -Ports*](../../../use/port_redirection/#port-redirection) explains in -detail how to manipulate ports in Docker. +This exposes port `80` of the container for use within a link without publishing +the port to the host system's interfaces. [*Redirect Ports*]( +../../../use/port_redirection/#port-redirection) explains in detail how to +manipulate ports in Docker. $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash -This sets environmental variables in the container. For illustration all -three flags are shown here. Where `-e`, -`--env` take an environment variable and value, or -if no "=" is provided, then that variable’s current value is passed -through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the -container). All three flags, `-e`, `--env` -and `--env-file` can be repeated. +This sets environmental variables in the container. For illustration all three +flags are shown here. Where `-e`, `--env` take an environment variable and +value, or if no "=" is provided, then that variable's current value is passed +through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All +three flags, `-e`, `--env` and `--env-file` can be repeated. -Regardless of the order of these three flags, the `--env-file` -are processed first, and then `-e`, `--env` flags. This way, the -`-e` or `--env` will override variables as needed. +Regardless of the order of these three flags, the `--env-file` are processed +first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will +override variables as needed. $ cat ./env.list TEST_FOO=BAR $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO TEST_FOO=This is a test -The `--env-file` flag takes a filename as an -argument and expects each line to be in the VAR=VAL format, mimicking -the argument passed to `--env`. Comment lines need -only be prefixed with `#` +The `--env-file` flag takes a filename as an argument and expects each line +to be in the VAR=VAL format, mimicking the argument passed to `--env`. Comment +lines need only be prefixed with `#` An example of a file passed with `--env-file` @@ -991,48 +972,44 @@ This will create and run a new container with the container name being $ sudo docker run --link /redis:redis --name console ubuntu bash -The `--link` flag will link the container named -`/redis` into the newly created container with the -alias `redis`. The new container can access the -network and environment of the redis container via environment -variables. The `--name` flag will assign the name -`console` to the newly created container. +The `--link` flag will link the container named `/redis` into the newly +created container with the alias `redis`. The new container can access the +network and environment of the redis container via environment variables. +The `--name` flag will assign the name `console` to the newly created +container. $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd -The `--volumes-from` flag mounts all the defined -volumes from the referenced containers. Containers can be specified by a -comma separated list or by repetitions of the `--volumes-from` -argument. The container ID may be optionally suffixed with -`:ro` or `:rw` to mount the -volumes in read-only or read-write mode, respectively. By default, the -volumes are mounted in the same mode (read write or read only) as the -reference container. +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by a comma separated list or by +repetitions of the `--volumes-from` argument. The container ID may be +optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only +or read-write mode, respectively. By default, the volumes are mounted in +the same mode (read write or read only) as the reference container. -The `-a` flag tells `docker run` -to bind to the container’s stdin, stdout or stderr. This makes it -possible to manipulate the output and input as needed. +The `-a` flag tells `docker run` to bind to the container'sstdin, stdout or +stderr. This makes it possible to manipulate the output and input as needed. $ sudo echo "test" | docker run -i -a stdin ubuntu cat - -This pipes data into a container and prints the container’s ID by -attaching only to the container’s stdin. +This pipes data into a container and prints the container's ID by attaching +only to the container'sstdin. $ sudo docker run -a stderr ubuntu echo test -This isn’t going to print anything unless there’s an error because we’ve -only attached to the stderr of the container. The container’s logs still -store what’s been written to stderr and stdout. +This isn't going to print anything unless there's an error because We've +only attached to the stderr of the container. The container's logs still + store what's been written to stderr and stdout. $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild This is how piping a file into a container could be done for a build. -The container’s ID will be printed after the build is done and the build +The container's ID will be printed after the build is done and the build logs could be retrieved using `docker logs`. This is useful if you need to pipe a file or something else into a container and -retrieve the container’s ID once the container has finished running. +retrieve the container's ID once the container has finished running. -#### A complete example +**A complete example:** $ sudo docker run -d --name static static-web-files sh $ sudo docker run -d --expose=8098 --name riak riakserver @@ -1043,45 +1020,33 @@ retrieve the container’s ID once the container has finished running. This example shows 5 containers that might be set up to test a web application change: -1. Start a pre-prepared volume image `static-web-files` - (in the background) that has CSS, image and static HTML in - it, (with a `VOLUME` instruction in the - `Dockerfile` to allow the web server to use - those files); -2. Start a pre-prepared `riakserver` image, give - the container name `riak` and expose port - `8098` to any containers that link to it; -3. Start the `appserver` image, restricting its - memory usage to 100MB, setting two environment variables - `DEVELOPMENT` and `BRANCH` - and bind-mounting the current directory (`$(pwd)` -) in the container in read-only mode as - `/app/bin`; -4. Start the `webserver`, mapping port - `443` in the container to port `1443` - on the Docker server, setting the DNS server to - `dns.dev.org` and DNS search domain to - `dev.org`, creating a volume to put the log - files into (so we can access it from another container), then - importing the files from the volume exposed by the - `static` container, and linking to all exposed - ports from `riak` and `app`. - Lastly, we set the hostname to `web.sven.dev.org` - so its consistent with the pre-generated SSL certificate; -5. Finally, we create a container that runs - `tail -f access.log` using the logs volume from - the `web` container, setting the workdir to - `/var/log/httpd`. The `--rm` - option means that when the container exits, the container’s layer is - removed. +1. Start a pre-prepared volume image `static-web-files` (in the background) + that has CSS, image and static HTML in it, (with a `VOLUME` instruction in + the Dockerfile to allow the web server to use those files); +2. Start a pre-prepared `riakserver` image, give the container name `riak` and + expose port `8098` to any containers that link to it; +3. Start the `appserver` image, restricting its memory usage to 100MB, setting + two environment variables `DEVELOPMENT` and `BRANCH` and bind-mounting the + current directory (`$(pwd)`) in the container in read-only mode as `/app/bin`; +4. Start the `webserver`, mapping port `443` in the container to port `1443` on + the Docker server, setting the DNS server to `dns.dev.org` and DNS search + domain to `dev.org`, creating a volume to put the log files into (so we can + access it from another container), then importing the files from the volume + exposed by the `static` container, and linking to all exposed ports from + `riak` and `app`. Lastly, we set the hostname to `web.sven.dev.org` so its + consistent with the pre-generated SSL certificate; +5. Finally, we create a container that runs `tail -f access.log` using the logs + volume from the `web` container, setting the workdir to `/var/log/httpd`. The + `--rm` option means that when the container exits, the container's layer is + removed. -## `save` +## save + +Save an image to a tar archive (streamed to stdout by default) Usage: docker save IMAGE - Save an image to a tar archive (streamed to stdout by default) - - -o, --output="": Write to an file, instead of STDOUT + -o, --output="": Write to an file, instead of STDOUT Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag. @@ -1098,65 +1063,65 @@ It is used to create a backup that can then be used with $ sudo docker save -o fedora-all.tar fedora $ sudo docker save -o fedora-latest.tar fedora:latest -## `search` +## search + +Search the docker index for images Usage: docker search TERM - Search the docker index for images - --no-trunc=false: Don᾿t truncate output -s, --stars=0: Only displays with at least xxx stars -t, --trusted=false: Only show trusted builds -See [*Find Public Images on the Central -Index*](../../../use/workingwithrepository/#searching-central-index) for +See [*Find Public Images on the Central Index*]( +../../../use/workingwithrepository/#searching-central-index) for more details on finding shared images from the commandline. -## `start` +## start + +Start a stopped container Usage: docker start [OPTIONS] CONTAINER - Start a stopped container - -a, --attach=false: Attach container᾿s stdout/stderr and forward all signals to the process -i, --interactive=false: Attach container᾿s stdin -## `stop` +## stop + +Stop a running container (Send SIGTERM, and then SIGKILL after grace period) Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - - -t, --time=10: Number of seconds to wait for the container to stop before killing it. + -t, --time=10: Number of seconds to wait for the container to stop before killing it. The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL -## `tag` +## tag + +Tag an image into a repository Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] - Tag an image into a repository - - -f, --force=false: Force + -f, --force=false: Force You can group your images together using names and tags, and then upload -them to [*Share Images via -Repositories*](../../../use/workingwithrepository/#working-with-the-repository). +them to [*Share Images via Repositories*]( +../../../use/workingwithrepository/#working-with-the-repository). -## `top` +## top Usage: docker top CONTAINER [ps OPTIONS] - Lookup the running processes of a container +Lookup the running processes of a container -## `version` +## version Show the version of the Docker client, daemon, and latest released version. -## `wait` +## wait Usage: docker wait [OPTIONS] NAME - Block until a container stops, then print its exit code. +Block until a container stops, then print its exit code. \ No newline at end of file diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 236b8065b8..f6f132a09d 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -2,7 +2,7 @@ page_title: Docker Run Reference page_description: Configure containers at runtime page_keywords: docker, run, configure, runtime -# [Docker Run Reference](#id2) +# Docker Run Reference **Docker runs processes in isolated containers**. When an operator executes `docker run`, she starts a process with its @@ -10,59 +10,60 @@ own file system, its own networking, and its own isolated process tree. The [*Image*](../../terms/image/#image-def) which starts the process may define defaults related to the binary to run, the networking to expose, and more, but `docker run` gives final control to -the operator who starts the container from the image. That’s the main -reason [*run*](../commandline/cli/#cli-run) has more options than any +the operator who starts the container from the image. That's the main +reason [*run*](../../commandline/cli/#cli-run) has more options than any other `docker` command. Every one of the [*Examples*](../../examples/#example-list) shows running containers, and so here we try to give more in-depth guidance. -## [General Form](#id3) +## General Form -As you’ve seen in the [*Examples*](../../examples/#example-list), the +As you`ve seen in the [*Examples*](../../examples/#example-list), the basic run command takes this form: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] To learn how to interpret the types of `[OPTIONS]`, -see [*Option types*](../commandline/cli/#cli-options). +see [*Option types*](../../commandline/cli/#cli-options). The list of `[OPTIONS]` breaks down into two groups: -1. Settings exclusive to operators, including: - - Detached or Foreground running, - - Container Identification, - - Network settings, and - - Runtime Constraints on CPU and Memory - - Privileges and LXC Configuration +1. Settings exclusive to operators, including: -2. Setting shared between operators and developers, where operators can - override defaults developers set in images at build time. + - Detached or Foreground running, + - Container Identification, + - Network settings, and + - Runtime Constraints on CPU and Memory + - Privileges and LXC Configuration + +2. Setting shared between operators and developers, where operators can + override defaults developers set in images at build time. Together, the `docker run [OPTIONS]` give complete control over runtime behavior to the operator, allowing them to override all defaults set by the developer during `docker build` and nearly all the defaults set by the Docker runtime itself. -## [Operator Exclusive Options](#id4) +## Operator Exclusive Options Only the operator (the person executing `docker run`) can set the following options. -- [Detached vs Foreground](#detached-vs-foreground) - - [Detached (-d)](#detached-d) - - [Foreground](#foreground) -- [Container Identification](#container-identification) - - [Name (–name)](#name-name) - - [PID Equivalent](#pid-equivalent) -- [Network Settings](#network-settings) -- [Clean Up (–rm)](#clean-up-rm) -- [Runtime Constraints on CPU and + - [Detached vs Foreground](#detached-vs-foreground) + - [Detached (-d)](#detached-d) + - [Foreground](#foreground) + - [Container Identification](#container-identification) + - [Name (–name)](#name-name) + - [PID Equivalent](#pid-equivalent) + - [Network Settings](#network-settings) + - [Clean Up (–rm)](#clean-up-rm) + - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory) -- [Runtime Privilege and LXC + - [Runtime Privilege and LXC Configuration](#runtime-privilege-and-lxc-configuration) -### [Detached vs Foreground](#id2) +## Detached vs Foreground When starting a Docker container, you must first decide if you want to run the container in the background in a "detached" mode or in the @@ -70,53 +71,50 @@ default foreground mode: -d=false: Detached mode: Run container in the background, print new container id -#### [Detached (-d)](#id3) +### Detached (-d) In detached mode (`-d=true` or just `-d`), all I/O should be done through network connections or shared volumes because the container is no longer listening to the commandline where you executed `docker run`. You can reattach to a detached container with `docker` -[*attach*](../commandline/cli/#cli-attach). If you choose to run a +[*attach*](commandline/cli/#attach). If you choose to run a container in the detached mode, then you cannot use the `--rm` option. -#### [Foreground](#id4) +### Foreground -In foreground mode (the default when `-d` is not -specified), `docker run` can start the process in -the container and attach the console to the process’s standard input, -output, and standard error. It can even pretend to be a TTY (this is -what most commandline executables expect) and pass along signals. All of -that is configurable: +In foreground mode (the default when `-d` is not specified), `docker run` +can start the process in the container and attach the console to the process's +standard input, output, and standard error. It can even pretend to be a TTY +(this is what most commandline executables expect) and pass along signals. All +of that is configurable: -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` -t=false : Allocate a pseudo-tty --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) -i=false : Keep STDIN open even if not attached -If you do not specify `-a` then Docker will [attach -everything -(stdin,stdout,stderr)](https://github.com/dotcloud/docker/blob/75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). -You can specify to which of the three standard streams -(`stdin`, `stdout`, -`stderr`) you’d like to connect instead, as in: +If you do not specify `-a` then Docker will [attach everything (stdin,stdout,stderr)]( +https://github.com/dotcloud/docker/blob/ +75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can specify to which +of the three standard streams (`stdin`, `stdout`, `stderr`) you'd like to connect +instead, as in: docker run -a stdin -a stdout -i -t ubuntu /bin/bash -For interactive processes (like a shell) you will typically want a tty -as well as persistent standard input (`stdin`), so -you’ll use `-i -t` together in most interactive -cases. +For interactive processes (like a shell) you will typically want a tty as well as +persistent standard input (`stdin`), so you'll use `-i -t` together in most +interactive cases. -### [Container Identification](#id5) +## Container Identification -#### [Name (–name)](#id6) +### Name (–name) The operator can identify a container in three ways: - UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") - UUID short identifier ("f78375b1c487") -- Name ("evil\_ptolemy") +- Name ("evil_ptolemy") The UUID identifiers come from the Docker daemon, and if you do not assign a name to the container with `--name` then @@ -127,16 +125,16 @@ name when defining (or any other place you need to identify a container). This works for both background and foreground Docker containers. -#### [PID Equivalent](#id7) +### PID Equivalent And finally, to help with automation, you can have Docker write the container ID out to a file of your choosing. This is similar to how some -programs might write out their process ID to a file (you’ve seen them as +programs might write out their process ID to a file (you`ve seen them as PID files): --cidfile="": Write the container ID to the file -### [Network Settings](#id8) +## Network Settings -n=true : Enable networking for this container --dns=[] : Set custom dns servers for the container @@ -150,19 +148,19 @@ files or STDIN/STDOUT only. Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. -### [Clean Up (–rm)](#id9) +## Clean Up (–rm) -By default a container’s file system persists even after the container +By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the final state) and you retain all your data by default. But if you are running short-term **foreground** processes, these container file -systems can really pile up. If instead you’d like Docker to +systems can really pile up. If instead you'd like Docker to **automatically clean up the container and remove the file system when the container exits**, you can add the `--rm` flag: --rm=false: Automatically remove the container when it exits (incompatible with -d) -### [Runtime Constraints on CPU and Memory](#id10) +## Runtime Constraints on CPU and Memory The operator can also adjust the performance parameters of the container: @@ -181,7 +179,7 @@ the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via Docker. -### [Runtime Privilege and LXC Configuration](#id11) +## Runtime Privilege and LXC Configuration --privileged=false: Give extended privileges to this container --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" @@ -189,71 +187,63 @@ containers when you start them via Docker. By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is because by default a container is not allowed to access any devices, but a -"privileged" container is given access to all devices (see -[lxc-template.go](https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go) -and documentation on [cgroups -devices](https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). +"privileged" container is given access to all devices (see [lxc-template.go]( +https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go) +and documentation on [cgroups devices]( +https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). When the operator executes `docker run --privileged`, Docker will enable to access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside containers on the host. Additional information about running with `--privileged` is available on the -[Docker -Blog](http://blog.docker.io/2013/09/docker-can-now-run-within-docker/). +[Docker Blog](http://blog.docker.io/2013/09/docker-can-now-run-within-docker/). -If the Docker daemon was started using the `lxc` -exec-driver (`docker -d --exec-driver=lxc`) then the -operator can also specify LXC options using one or more -`--lxc-conf` parameters. These can be new parameters -or override existing parameters from the -[lxc-template.go](https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go). -Note that in the future, a given host’s Docker daemon may not use LXC, -so this is an implementation-specific configuration meant for operators -already familiar with using LXC directly. +If the Docker daemon was started using the `lxc` exec-driver +(`docker -d --exec-driver=lxc`) then the operator can also specify LXC options +using one or more `--lxc-conf` parameters. These can be new parameters or +override existing parameters from the [lxc-template.go]( +https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go). +Note that in the future, a given host's docker daemon may not use LXC, so this +is an implementation-specific configuration meant for operators already +familiar with using LXC directly. -## Overriding `Dockerfile` Image Defaults +## Overriding Dockerfile Image Defaults -When a developer builds an image from a -[*Dockerfile*](../builder/#dockerbuilder) or when she commits it, the -developer can set a number of default parameters that take effect when -the image starts up as a container. +When a developer builds an image from a [*Dockerfile*](builder/#dockerbuilder) +or when she commits it, the developer can set a number of default parameters +that take effect when the image starts up as a container. -Four of the `Dockerfile` commands cannot be -overridden at runtime: `FROM, MAINTAINER, RUN`, and -`ADD`. Everything else has a corresponding override -in `docker run`. We’ll go through what the developer -might have set in each `Dockerfile` instruction and -how the operator can override that setting. +Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, +`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override +in `docker run`. We'll go through what the developer might have set in each +Dockerfile instruction and how the operator can override that setting. -- [CMD (Default Command or Options)](#cmd-default-command-or-options) -- [ENTRYPOINT (Default Command to Execute at - Runtime](#entrypoint-default-command-to-execute-at-runtime) -- [EXPOSE (Incoming Ports)](#expose-incoming-ports) -- [ENV (Environment Variables)](#env-environment-variables) -- [VOLUME (Shared Filesystems)](#volume-shared-filesystems) -- [USER](#user) -- [WORKDIR](#workdir) + - [CMD (Default Command or Options)](#cmd-default-command-or-options) + - [ENTRYPOINT (Default Command to Execute at Runtime]( + #entrypoint-default-command-to-execute-at-runtime) + - [EXPOSE (Incoming Ports)](#expose-incoming-ports) + - [ENV (Environment Variables)](#env-environment-variables) + - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) + - [USER](#user) + - [WORKDIR](#workdir) -### [CMD (Default Command or Options)](#id12) +## CMD (Default Command or Options) Recall the optional `COMMAND` in the Docker commandline: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] -This command is optional because the person who created the -`IMAGE` may have already provided a default -`COMMAND` using the `Dockerfile` -`CMD`. As the operator (the person running a -container from the image), you can override that `CMD` -just by specifying a new `COMMAND`. +This command is optional because the person who created the `IMAGE` may have +already provided a default `COMMAND` using the Dockerfile `CMD`. As the +operator (the person running a container from the image), you can override that +`CMD` just by specifying a new `COMMAND`. -If the image also specifies an `ENTRYPOINT` then the -`CMD` or `COMMAND` get appended -as arguments to the `ENTRYPOINT`. +If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get +appended as arguments to the `ENTRYPOINT`. -### [ENTRYPOINT (Default Command to Execute at Runtime](#id13) +## ENTRYPOINT (Default Command to Execute at Runtime --entrypoint="": Overwrite the default entrypoint set by the image @@ -276,13 +266,12 @@ or two examples of how to pass more parameters to that ENTRYPOINT: docker run -i -t --entrypoint /bin/bash example/redis -c ls -l docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help -### [EXPOSE (Incoming Ports)](#id14) +## EXPOSE (Incoming Ports) -The `Dockerfile` doesn’t give much control over -networking, only providing the `EXPOSE` instruction -to give a hint to the operator about what incoming ports might provide -services. The following options work with or override the -`Dockerfile`‘s exposed defaults: +The Dockerfile doesn't give much control over networking, only providing the +`EXPOSE` instruction to give a hint to the operator about what incoming ports +might provide services. The following options work with or override the +Dockerfile's exposed defaults: --expose=[]: Expose a port from the container without publishing it to your host @@ -293,40 +282,34 @@ services. The following options work with or override the (use 'docker port' to see the actual mapping) --link="" : Add link to another container (name:alias) -As mentioned previously, `EXPOSE` (and -`--expose`) make a port available **in** a container -for incoming connections. The port number on the inside of the container -(where the service listens) does not need to be the same number as the -port exposed on the outside of the container (where clients connect), so -inside the container you might have an HTTP service listening on port 80 -(and so you `EXPOSE 80` in the -`Dockerfile`), but outside the container the port -might be 42800. +As mentioned previously, `EXPOSE` (and `--expose`) make a port available **in** +a container for incoming connections. The port number on the inside of the +container (where the service listens) does not need to be the same number as the +port exposed on the outside of the container (where clients connect), so inside +the container you might have an HTTP service listening on port 80 (and so you +`EXPOSE 80` in the Dockerfile), but outside the container the port might be +42800. -To help a new client container reach the server container’s internal -port operator `--expose`‘d by the operator or -`EXPOSE`‘d by the developer, the operator has three -choices: start the server container with `-P` or -`-p,` or start the client container with -`--link`. +To help a new client container reach the server container's internal port +operator `--expose``d by the operator or `EXPOSE``d by the developer, the +operator has three choices: start the server container with `-P` or `-p,` or +start the client container with `--link`. -If the operator uses `-P` or `-p` -then Docker will make the exposed port accessible on the host -and the ports will be available to any client that can reach the host. -To find the map between the host ports and the exposed ports, use -`docker port`) +If the operator uses `-P` or `-p` then Docker will make the exposed port +accessible on the host and the ports will be available to any client that +can reach the host. To find the map between the host ports and the exposed +ports, use `docker port`) -If the operator uses `--link` when starting the new -client container, then the client container can access the exposed port -via a private networking interface. Docker will set some environment -variables in the client container to help indicate which interface and -port to use. +If the operator uses `--link` when starting the new client container, then the +client container can access the exposed port via a private networking interface. +Docker will set some environment variables in the client container to help +indicate which interface and port to use. -### [ENV (Environment Variables)](#id15) +## ENV (Environment Variables) -The operator can **set any environment variable** in the container by -using one or more `-e` flags, even overriding those -already defined by the developer with a Dockefile `ENV`: +The operator can **set any environment variable** in the container by using one +or more `-e` flags, even overriding those already defined by the developer with +a Dockefile `ENV`: $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export declare -x HOME="/" @@ -340,10 +323,10 @@ already defined by the developer with a Dockefile `ENV`: Similarly the operator can set the **hostname** with `-h`. -`--link name:alias` also sets environment variables, -using the *alias* string to define environment variables within the -container that give the IP and PORT information for connecting to the -service container. Let’s imagine we have a container running Redis: +`--link name:alias` also sets environment variables, using the *alias* string to +define environment variables within the container that give the IP and PORT +information for connecting to the service container. Let's imagine we have a +container running Redis: # Start the service container, named redis-name $ docker run -d --name redis-name dockerfiles/redis @@ -358,7 +341,7 @@ service container. Let’s imagine we have a container running Redis: $ docker port 4241164edf6f 6379 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f -Yet we can get information about the Redis container’s exposed ports +Yet we can get information about the Redis container'sexposed ports with `--link`. Choose an alias that will form a valid environment variable! @@ -377,40 +360,36 @@ valid environment variable! declare -x SHLVL="1" declare -x container="lxc" -And we can use that information to connect from another container as a -client: +And we can use that information to connect from another container as a client: $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' 172.17.0.32:6379> -### [VOLUME (Shared Filesystems)](#id16) +## VOLUME (Shared Filesystems) -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. --volumes-from="": Mount all volumes from the given container(s) -The volumes commands are complex enough to have their own documentation -in section [*Share Directories via -Volumes*](../../use/working_with_volumes/#volume-def). A developer can -define one or more `VOLUME`s associated with an -image, but only the operator can give access from one container to -another (or from a container to a volume mounted on the host). +The volumes commands are complex enough to have their own documentation in +section [*Share Directories via Volumes*](../../use/working_with_volumes/#volume-def). +A developer can define one or more `VOLUME's associated with an image, but only the +operator can give access from one container to another (or from a container to a +volume mounted on the host). -### [USER](#id17) +## USER -The default user within a container is `root` (id = -0), but if the developer created additional users, those are accessible -too. The developer can set a default user to run the first process with -the `Dockerfile USER` command, but the operator can -override it +The default user within a container is `root` (id = 0), but if the developer +created additional users, those are accessible too. The developer can set a +default user to run the first process with the `Dockerfile USER` command, +but the operator can override it: -u="": Username or UID -### [WORKDIR](#id18) +## WORKDIR -The default working directory for running binaries within a container is -the root directory (`/`), but the developer can set -a different default with the `Dockerfile WORKDIR` -command. The operator can override this with: +The default working directory for running binaries within a container is the +root directory (`/`), but the developer can set a different default with the +Dockerfile `WORKDIR` command. The operator can override this with: -w="": Working directory inside the container diff --git a/docs/sources/terms.md b/docs/sources/terms.md index 59579d99a1..228b18fbd9 100644 --- a/docs/sources/terms.md +++ b/docs/sources/terms.md @@ -4,10 +4,10 @@ ## Contents: -- [File System](filesystem/) -- [Layers](layer/) -- [Image](image/) -- [Container](container/) -- [Registry](registry/) -- [Repository](repository/) + - [File System](filesystem/) + - [Layers](layer/) + - [Image](image/) + - [Container](container/) + - [Registry](registry/) + - [Repository](repository/) diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md index 92a0265d99..d7f139a3ca 100644 --- a/docs/sources/terms/container.md +++ b/docs/sources/terms/container.md @@ -6,22 +6,20 @@ page_keywords: containers, lxc, concepts, explanation, image, container ## Introduction -![](../../_images/docker-filesystems-busyboxrw.png) +![](../../static_files/docker-filesystems-busyboxrw.png) -Once you start a process in Docker from an -[*Image*](../image/#image-def), Docker fetches the image and its -[*Parent Image*](../image/#parent-image-def), and repeats the process -until it reaches the [*Base Image*](../image/#base-image-def). Then the -[*Union File System*](../layer/#ufs-def) adds a read-write layer on top. -That read-write layer, plus the information about its [*Parent -Image*](../image/#parent-image-def) and some additional information like -its unique id, networking configuration, and resource limits is called a -**container**. +Once you start a process in Docker from an [*Image*](image.md), Docker fetches +the image and its [*Parent Image*](image.md), and repeats the process until it +reaches the [*Base Image*](image.md/#base-image-def). Then the +[*Union File System*](layer.md) adds a read-write layer on top. That read-write +layer, plus the information about its [*Parent Image*](image.md) and some +additional information like its unique id, networking configuration, and +resource limits is called a **container**. ## Container State -Containers can change, and so they have state. A container may be -**running** or **exited**. +Containers can change, and so they have state. A container may be **running** or +**exited**. When a container is running, the idea of a "container" also includes a tree of processes running on the CPU, isolated from the other processes @@ -33,9 +31,8 @@ processes restart from scratch (their memory state is **not** preserved in a container), but the file system is just as it was when the container was stopped. -You can promote a container to an [*Image*](../image/#image-def) with -`docker commit`. Once a container is an image, you -can use it as a parent for new containers. +You can promote a container to an [*Image*](image.md) with `docker commit`. +Once a container is an image, you can use it as a parent for new containers. ## Container IDs diff --git a/docs/sources/terms/filesystem.md b/docs/sources/terms/filesystem.md index 2038d009e3..07f75e361e 100644 --- a/docs/sources/terms/filesystem.md +++ b/docs/sources/terms/filesystem.md @@ -6,13 +6,13 @@ page_keywords: containers, files, linux ## Introduction -![](../../_images/docker-filesystems-generic.png) +![](../../static_files/docker-filesystems-generic.png) In order for a Linux system to run, it typically needs two [file systems](http://en.wikipedia.org/wiki/Filesystem): -1. boot file system (bootfs) -2. root file system (rootfs) +1. boot file system (bootfs) +2. root file system (rootfs) The **boot file system** contains the bootloader and the kernel. The user never makes any changes to the boot file system. In fact, soon @@ -22,10 +22,9 @@ initrd disk image. The **root file system** includes the typical directory structure we associate with Unix-like operating systems: -`/dev, /proc, /bin, /etc, /lib, /usr,` and -`/tmp` plus all the configuration files, binaries -and libraries required to run user applications (like bash, ls, and so -forth). +`/dev, /proc, /bin, /etc, /lib, /usr,` and `/tmp` plus all the configuration +files, binaries and libraries required to run user applications (like bash, +ls, and so forth). While there can be important kernel differences between different Linux distributions, the contents and organization of the root file system are @@ -33,4 +32,4 @@ usually what make your software packages dependent on one distribution versus another. Docker can help solve this problem by running multiple distributions at the same time. -![](../../_images/docker-filesystems-multiroot.png) +![](../../static_files/docker-filesystems-multiroot.png) diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md index 721d4c954c..031dd2e978 100644 --- a/docs/sources/terms/image.md +++ b/docs/sources/terms/image.md @@ -6,7 +6,7 @@ page_keywords: containers, lxc, concepts, explanation, image, container ## Introduction -![](../../_images/docker-filesystems-debian.png) +![](../../static_files/docker-filesystems-debian.png) In Docker terminology, a read-only [*Layer*](../layer/#layer-def) is called an **image**. An image never changes. @@ -14,14 +14,14 @@ called an **image**. An image never changes. Since Docker uses a [*Union File System*](../layer/#ufs-def), the processes think the whole file system is mounted read-write. But all the changes go to the top-most writeable layer, and underneath, the original -file in the read-only image is unchanged. Since images don’t change, +file in the read-only image is unchanged. Since images don't change, images do not have state. -![](../../_images/docker-filesystems-debianrw.png) +![](../../static_files/docker-filesystems-debianrw.png) ## Parent Image -![](../../_images/docker-filesystems-multilayer.png) +![](../../static_files/docker-filesystems-multilayer.png) Each image may depend on one more image which forms the layer beneath it. We sometimes say that the lower image is the **parent** of the upper diff --git a/docs/sources/terms/layer.md b/docs/sources/terms/layer.md index 7665467aae..39c71fa4b6 100644 --- a/docs/sources/terms/layer.md +++ b/docs/sources/terms/layer.md @@ -20,7 +20,7 @@ file system *over* the read-only file system. In fact there may be multiple read-only file systems stacked on top of each other. We think of each one of these file systems as a **layer**. -![](../../_images/docker-filesystems-multilayer.png) +![](../../static_files/docker-filesystems-multilayer.png) At first, the top read-write layer has nothing in it, but any time a process creates a file, this happens in the top layer. And if something diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md index 0d5af2c65d..bb3209ebac 100644 --- a/docs/sources/terms/registry.md +++ b/docs/sources/terms/registry.md @@ -6,9 +6,9 @@ page_keywords: containers, lxc, concepts, explanation, image, repository, contai ## Introduction -A Registry is a hosted service containing -[*repositories*](../repository/#repository-def) of -[*images*](../image/#image-def) which responds to the Registry API. +A Registry is a hosted service containing [*repositories*]( +../repository/#repository-def) of [*images*](../image/#image-def) which +responds to the Registry API. The default registry can be accessed using a browser at [http://images.docker.io](http://images.docker.io) or using the @@ -16,5 +16,5 @@ The default registry can be accessed using a browser at ## Further Reading -For more information see [*Working with -Repositories*](../../use/workingwithrepository/#working-with-the-repository) +For more information see [*Working with Repositories*]( +../use/workingwithrepository/#working-with-the-repository) diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md index 7ccd69ad19..52760ac20d 100644 --- a/docs/sources/terms/repository.md +++ b/docs/sources/terms/repository.md @@ -13,26 +13,23 @@ server. Images can be associated with a repository (or multiple) by giving them an image name using one of three different commands: -1. At build time (e.g. `sudo docker build -t IMAGENAME` -), -2. When committing a container (e.g. - `sudo docker commit CONTAINERID IMAGENAME`) or -3. When tagging an image id with an image name (e.g. - `sudo docker tag IMAGEID IMAGENAME`). +1. At build time (e.g. `sudo docker build -t IMAGENAME`), +2. When committing a container (e.g. + `sudo docker commit CONTAINERID IMAGENAME`) or +3. When tagging an image id with an image name (e.g. + `sudo docker tag IMAGEID IMAGENAME`). A Fully Qualified Image Name (FQIN) can be made up of 3 parts: `[registry_hostname[:port]/][user_name/](repository_name:version_tag)` -`username` and `registry_hostname` -default to an empty string. When `registry_hostname` -is an empty string, then `docker push` -will push to `index.docker.io:80`. +`username` and `registry_hostname` default to an empty string. When +`registry_hostname` is an empty string, then `docker push` will push to +`index.docker.io:80`. If you create a new repository which you want to share, you will need to -set at least the `user_name`, as the ‘default’ blank -`user_name` prefix is reserved for official Docker -images. +set at least the `user_name`, as the `default` blank `user_name` prefix is +reserved for official Docker images. -For more information see [*Working with -Repositories*](../../use/workingwithrepository/#working-with-the-repository) +For more information see [*Working with Repositories*]( +../use/workingwithrepository/#working-with-the-repository) diff --git a/docs/sources/toctree.md b/docs/sources/toctree.md index e837c7e3af..ec1832fc21 100644 --- a/docs/sources/toctree.md +++ b/docs/sources/toctree.md @@ -6,12 +6,12 @@ page_keywords: todo, docker, documentation, installation, usage, examples, contr This documentation has the following resources: -- [Installation](../installation/) -- [Use](../use/) -- [Examples](../examples/) -- [Reference Manual](../reference/) -- [Contributing](../contributing/) -- [Glossary](../terms/) -- [Articles](../articles/) -- [FAQ](../faq/) + - [Installation](../installation/) + - [Use](../use/) + - [Examples](../examples/) + - [Reference Manual](../reference/) + - [Contributing](../contributing/) + - [Glossary](../terms/) + - [Articles](../articles/) + - [FAQ](../faq/) diff --git a/docs/sources/use.md b/docs/sources/use.md index ce4a51025c..5b2524361e 100644 --- a/docs/sources/use.md +++ b/docs/sources/use.md @@ -2,12 +2,12 @@ ## Contents: -- [First steps with Docker](basics/) -- [Share Images via Repositories](workingwithrepository/) -- [Redirect Ports](port_redirection/) -- [Configure Networking](networking/) -- [Automatically Start Containers](host_integration/) -- [Share Directories via Volumes](working_with_volumes/) -- [Link Containers](working_with_links_names/) -- [Link via an Ambassador Container](ambassador_pattern_linking/) -- [Using Puppet](puppet/) \ No newline at end of file + - [First steps with Docker](basics/) + - [Share Images via Repositories](workingwithrepository/) + - [Redirect Ports](port_redirection/) + - [Configure Networking](networking/) + - [Automatically Start Containers](host_integration/) + - [Share Directories via Volumes](working_with_volumes/) + - [Link Containers](working_with_links_names/) + - [Link via an Ambassador Container](ambassador_pattern_linking/) + - [Using Puppet](puppet/) \ No newline at end of file diff --git a/docs/sources/use/ambassador_pattern_linking.md b/docs/sources/use/ambassador_pattern_linking.md index 685d155917..a04dbdffc0 100644 --- a/docs/sources/use/ambassador_pattern_linking.md +++ b/docs/sources/use/ambassador_pattern_linking.md @@ -62,8 +62,7 @@ linking to the local redis ambassador. ## How it works The following example shows what the `svendowideit/ambassador` -container does automatically (with a tiny amount of -`sed`) +container does automatically (with a tiny amount of `sed`) On the docker host (192.168.1.52) that redis will run on: @@ -82,8 +81,8 @@ On the docker host (192.168.1.52) that redis will run on: # add redis ambassador $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh -in the redis\_ambassador container, you can see the linked redis -containers’s env +in the redis_ambassador container, you can see the linked redis +containers'senv $ env REDIS_PORT=tcp://172.17.0.136:6379 diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md index e283a9dec8..2d1bf34f96 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/use/basics.md @@ -37,7 +37,7 @@ cache. > characters of the full image ID - which can be found using > `docker inspect` or `docker images --no-trunc=true` -**If you’re using OS X** then you shouldn’t use `sudo`. +**If you're using OS X** then you shouldn't use `sudo`. ## Running an interactive shell @@ -75,9 +75,9 @@ following format: `tcp://[host][:port]` or For example: -- `tcp://host:4243` -\> tcp connection on +- `tcp://host:4243` -> tcp connection on host:4243 -- `unix://path/to/socket` -\> unix socket located +- `unix://path/to/socket` -> unix socket located at `path/to/socket` `-H`, when empty, will default to the same value as @@ -170,7 +170,6 @@ will be stored (as a diff). See which images you already have using the You now have a image state from which you can create new instances. -Read more about [*Share Images via -Repositories*](../workingwithrepository/#working-with-the-repository) or -continue to the complete [*Command -Line*](../../reference/commandline/cli/#cli) +Read more about [*Share Images via Repositories*]( +../workingwithrepository/#working-with-the-repository) or +continue to the complete [*Command Line*](../../reference/commandline/cli/#cli) diff --git a/docs/sources/use/chef.md b/docs/sources/use/chef.md index b35391dca5..476b2919d0 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/use/chef.md @@ -6,13 +6,13 @@ page_keywords: chef, installation, usage, docker, documentation > **Note**: > Please note this is a community contributed installation path. The only -> ‘official’ installation is using the +> `official` installation is using the > [*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation > path. This version may sometimes be out of date. ## Requirements -To use this guide you’ll need a working installation of +To use this guide you'll need a working installation of [Chef](http://www.getchef.com/). This cookbook supports a variety of operating systems. diff --git a/docs/sources/use/host_integration.md b/docs/sources/use/host_integration.md index 0aa0dc8314..370c00e20a 100644 --- a/docs/sources/use/host_integration.md +++ b/docs/sources/use/host_integration.md @@ -5,8 +5,7 @@ page_keywords: systemd, upstart, supervisor, docker, documentation, host integra # Automatically Start Containers You can use your Docker containers with process managers like -`upstart`, `systemd` and -`supervisor`. +`upstart`, `systemd` and `supervisor`. ## Introduction @@ -27,7 +26,7 @@ docker. ## Sample Upstart Script -In this example we’ve already created a container to run Redis with +In this example We've already created a container to run Redis with `--name redis_server`. To create an upstart script for our container, we create a file named `/etc/init/redis.conf` and place the following into @@ -42,7 +41,7 @@ it: /usr/bin/docker start -a redis_server end script -Next, we have to configure docker so that it’s run with the option +Next, we have to configure docker so that it's run with the option `-r=false`. Run the following command: $ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker" diff --git a/docs/sources/use/networking.md b/docs/sources/use/networking.md index 3dfca0cb94..2249ca42cd 100644 --- a/docs/sources/use/networking.md +++ b/docs/sources/use/networking.md @@ -10,10 +10,10 @@ Docker uses Linux bridge capabilities to provide network connectivity to containers. The `docker0` bridge interface is managed by Docker for this purpose. When the Docker daemon starts it : -- creates the `docker0` bridge if not present -- searches for an IP address range which doesn’t overlap with an existing route -- picks an IP in the selected range -- assigns this IP to the `docker0` bridge + - creates the `docker0` bridge if not present + - searches for an IP address range which doesn't overlap with an existing route + - picks an IP in the selected range + - assigns this IP to the `docker0` bridge @@ -47,7 +47,7 @@ is dedicated to the 52f811c5d3d6 container. ## How to use a specific IP address range Docker will try hard to find an IP range that is not used by the host. -Even though it works for most cases, it’s not bullet-proof and sometimes +Even though it works for most cases, it's not bullet-proof and sometimes you need to have more control over the IP addressing scheme. For this purpose, Docker allows you to manage the `docker0` @@ -56,10 +56,10 @@ parameter. In this scenario: -- ensure Docker is stopped -- create your own bridge (`bridge0` for example) -- assign a specific IP to this bridge -- start Docker with the `-b=bridge0` parameter + - ensure Docker is stopped + - create your own bridge (`bridge0` for example) + - assign a specific IP to this bridge + - start Docker with the `-b=bridge0` parameter @@ -107,14 +107,12 @@ In this scenario: ## Container intercommunication -The value of the Docker daemon’s `icc` parameter +The value of the Docker daemon's `icc` parameter determines whether containers can communicate with each other over the bridge network. -- The default, `-icc=true` allows containers to - communicate with each other. -- `-icc=false` means containers are isolated from - each other. + - The default, `-icc=true` allows containers to communicate with each other. + - `-icc=false` means containers are isolated from each other. Docker uses `iptables` under the hood to either accept or drop communication between containers. @@ -125,7 +123,7 @@ Well. Things get complicated here. The `vethXXXX` interface is the host side of a point-to-point link between the host and the corresponding container; -the other side of the link is the container’s `eth0` +the other side of the link is the container's `eth0` interface. This pair (host `vethXXX` and container `eth0`) are connected like a tube. Everything that comes in one side will come out the other side. @@ -135,6 +133,6 @@ ip link command) and the namespaces infrastructure. ## I want more -Jérôme Petazzoni has create `pipework` to connect -together containers in arbitrarily complex scenarios : +Jérôme Petazzoni has create `pipework` to connect together containers in +arbitrarily complex scenarios: [https://github.com/jpetazzo/pipework](https://github.com/jpetazzo/pipework) diff --git a/docs/sources/use/port_redirection.md b/docs/sources/use/port_redirection.md index a85234f48f..ef0e644ace 100644 --- a/docs/sources/use/port_redirection.md +++ b/docs/sources/use/port_redirection.md @@ -31,22 +31,19 @@ containers, Docker provides the linking mechanism. To bind all the exposed container ports to the host automatically, use `docker run -P `. The mapped host ports will be auto-selected from a pool of unused ports (49000..49900), and -you will need to use `docker ps`, -`docker inspect ` or -`docker port ` to determine -what they are. +you will need to use `docker ps`, `docker inspect ` or +`docker port ` to determine what they are. ## Binding a port to a host interface To bind a port of the container to a specific interface of the host -system, use the `-p` parameter of the -`docker run` command: +system, use the `-p` parameter of the `docker run` command: # General syntax docker run -p [([:[host_port]])|():][/udp] When no host interface is provided, the port is bound to all available -interfaces of the host machine (aka INADDR\_ANY, or 0.0.0.0).When no +interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0). When no host port is provided, one is dynamically allocated. The possible combinations of options for TCP port are the following: @@ -68,9 +65,9 @@ combinations described for TCP work. Here is only one example: # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine. docker run -p 127.0.0.1:53:5353/udp -The command `docker port` lists the interface and -port on the host machine bound to a given container port. It is useful -when using dynamically allocated ports: +The command `docker port` lists the interface and port on the host machine +bound to a given container port. It is useful when using dynamically allocated +ports: # Bind to a dynamically allocated port docker run -p 127.0.0.1::8080 --name dyn-bound @@ -84,29 +81,22 @@ when using dynamically allocated ports: Communication between two containers can also be established in a docker-specific way called linking. -To briefly present the concept of linking, let us consider two -containers: `server`, containing the service, and -`client`, accessing the service. Once -`server` is running, `client` is -started and links to server. Linking sets environment variables in -`client` giving it some information about -`server`. In this sense, linking is a method of -service discovery. +To briefly present the concept of linking, let us consider two containers: +`server`, containing the service, and `client`, accessing the service. Once +`server` is running, `client` is started and links to server. Linking sets +environment variables in `client` giving it some information about `server`. +In this sense, linking is a method of service discovery. -Let us now get back to our topic of interest; communication between the -two containers. We mentioned that the tricky part about this -communication was that the IP address of `server` -was not fixed. Therefore, some of the environment variables are going to -be used to inform `client` about this IP address. -This process called exposure, is possible because `client` -is started after `server` has been -started. +Let us now get back to our topic of interest; communication between the two +containers. We mentioned that the tricky part about this communication was that +the IP address of `server` was not fixed. Therefore, some of the environment +variables are going to be used to inform `client` about this IP address. This +process called exposure, is possible because `client` is started after `server` +has been started. -Here is a full example. On `server`, the port of -interest is exposed. The exposure is done either through the -`--expose` parameter to the `docker run` -command, or the `EXPOSE` build command in -a Dockerfile: +Here is a full example. On `server`, the port of interest is exposed. The +exposure is done either through the `--expose` parameter to the `docker run` +command, or the `EXPOSE` build command in a Dockerfile: # Expose port 80 docker run --expose 80 --name server @@ -116,8 +106,7 @@ The `client` then links to the `server`: # Link docker run --name client --link server:linked-server -`client` locally refers to `server` -as `linked-server`. The following +`client` locally refers to `server` as `linked-server`. The following environment variables, among others, are available on `client`: # The default protocol, ip, and port of the service running in the container @@ -129,9 +118,7 @@ environment variables, among others, are available on `client`: LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 LINKED-SERVER_PORT_80_TCP_PORT=80 -This tells `client` that a service is running on -port 80 of `server` and that `server` -is accessible at the IP address 172.17.0.8 +This tells `client` that a service is running on port 80 of `server` and that +`server` is accessible at the IP address 172.17.0.8 -Note: Using the `-p` parameter also exposes the -port.. +Note: Using the `-p` parameter also exposes the port. diff --git a/docs/sources/use/puppet.md b/docs/sources/use/puppet.md index 55f16dd5bc..026b1defb1 100644 --- a/docs/sources/use/puppet.md +++ b/docs/sources/use/puppet.md @@ -4,15 +4,15 @@ page_keywords: puppet, installation, usage, docker, documentation # Using Puppet -> *Note:* Please note this is a community contributed installation path. The only -> ‘official’ installation is using the +> *Note:* Please note this is a community contributed installation path. The +> only `official` installation is using the > [*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation > path. This version may sometimes be out of date. ## Requirements -To use this guide you’ll need a working installation of Puppet from -[Puppetlabs](https://www.puppetlabs.com) . +To use this guide you'll need a working installation of Puppet from +[Puppetlabs](https://puppetlabs.com) . The module also currently uses the official PPA so only works with Ubuntu. @@ -26,7 +26,7 @@ installed using the built-in module tool. puppet module install garethr/docker It can also be found on -[GitHub](https://www.github.com/garethr/garethr-docker) if you would +[GitHub](https://github.com/garethr/garethr-docker) if you would rather download the source. ## Usage diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md index 67ca8004f1..40260feabf 100644 --- a/docs/sources/use/working_with_links_names.md +++ b/docs/sources/use/working_with_links_names.md @@ -6,19 +6,18 @@ page_keywords: Examples, Usage, links, linking, docker, documentation, examples, ## Introduction -From version 0.6.5 you are now able to `name` a -container and `link` it to another container by -referring to its name. This will create a parent -\> child relationship -where the parent container can see selected information about its child. +From version 0.6.5 you are now able to `name` a container and `link` it to +another container by referring to its name. This will create a parent -> child +relationship where the parent container can see selected information about its +child. ## Container Naming New in version v0.6.5. -You can now name your container by using the `--name` -flag. If no name is provided, Docker will automatically -generate a name. You can see this name using the `docker ps` -command. +You can now name your container by using the `--name` flag. If no name is +provided, Docker will automatically generate a name. You can see this name +using the `docker ps` command. # format is "sudo docker run --name " $ sudo docker run --name test ubuntu /bin/bash @@ -33,52 +32,45 @@ command. New in version v0.6.5. Links allow containers to discover and securely communicate with each -other by using the flag `-link name:alias`. -Inter-container communication can be disabled with the daemon flag -`-icc=false`. With this flag set to -`false`, Container A cannot access Container B -unless explicitly allowed via a link. This is a huge win for securing -your containers. When two containers are linked together Docker creates -a parent child relationship between the containers. The parent container -will be able to access information via environment variables of the -child such as name, exposed ports, IP and other selected environment -variables. +other by using the flag `-link name:alias`. Inter-container communication +can be disabled with the daemon flag `-icc=false`. With this flag set to +`false`, Container A cannot access Container unless explicitly allowed via +a link. This is a huge win for securing your containers. When two containers +are linked together Docker creates a parent child relationship between the +containers. The parent container will be able to access information via +environment variables of the child such as name, exposed ports, IP and other +selected environment variables. -When linking two containers Docker will use the exposed ports of the -container to create a secure tunnel for the parent to access. If a -database container only exposes port 8080 then the linked container will -only be allowed to access port 8080 and nothing else if inter-container -communication is set to false. +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. If a database container +only exposes port 8080 then the linked container will only be allowed to access +port 8080 and nothing else if inter-container communication is set to false. -For example, there is an image called `crosbymichael/redis` -that exposes the port 6379 and starts the Redis server. Let’s -name the container as `redis` based on that image -and run it as daemon. +For example, there is an image called `crosbymichael/redis` that exposes the +port 6379 and starts the Redis server. Let's name the container as `redis` +based on that image and run it as daemon. $ sudo docker run -d -name redis crosbymichael/redis -We can issue all the commands that you would expect using the name -`redis`; start, stop, attach, using the name for our -container. The name also allows us to link other containers into this -one. +We can issue all the commands that you would expect using the name `redis`; +start, stop, attach, using the name for our container. The name also allows +us to link other containers into this one. -Next, we can start a new web application that has a dependency on Redis -and apply a link to connect both containers. If you noticed when running -our Redis server we did not use the `-p` flag to -publish the Redis port to the host system. Redis exposed port 6379 and -this is all we need to establish a link. +Next, we can start a new web application that has a dependency on Redis and +apply a link to connect both containers. If you noticed when running our Redis +server we did not use the `-p` flag to publish the Redis port to the host +system. Redis exposed port 6379 and this is all we need to establish a link. $ sudo docker run -t -i -link redis:db -name webapp ubuntu bash -When you specified `-link redis:db` you are telling -Docker to link the container named `redis` into this -new container with the alias `db`. Environment -variables are prefixed with the alias so that the parent container can -access network and environment information from the containers that are +When you specified `-link redis:db` you are telling Docker to link the +container named `redis` into this new container with the alias `db`. +Environment variables are prefixed with the alias so that the parent container +can access network and environment information from the containers that are linked into it. -If we inspect the environment variables of the second container, we -would see all the information about the child container. +If we inspect the environment variables of the second container, we would see +all the information about the child container. $ root@4c01db0b339c:/# env @@ -98,20 +90,20 @@ would see all the information about the child container. _=/usr/bin/env root@4c01db0b339c:/# -Accessing the network information along with the environment of the -child container allows us to easily connect to the Redis service on the -specific IP and port in the environment. +Accessing the network information along with the environment of the child +container allows us to easily connect to the Redis service on the specific +IP and port in the environment. > **Note**: > These Environment variables are only set for the first process in the > container. Similarly, some daemons (such as `sshd`) > will scrub them when spawning shells for connection. -You can work around this by storing the initial `env` -in a file, or looking at `/proc/1/environ`. +You can work around this by storing the initial `env` in a file, or looking +at `/proc/1/environ`. -Running `docker ps` shows the 2 containers, and the -`webapp/db` alias name for the Redis container. +Running `docker ps` shows the 2 containers, and the `webapp/db` alias name for +the Redis container. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md index e95d3786b1..4712a9fbff 100644 --- a/docs/sources/use/working_with_volumes.md +++ b/docs/sources/use/working_with_volumes.md @@ -11,20 +11,20 @@ containers that bypasses the [*Union File System*](../../terms/layer/#ufs-def) to provide several useful features for persistent or shared data: -- **Data volumes can be shared and reused between containers:** - This is the feature that makes data volumes so powerful. You can - use it for anything from hot database upgrades to custom backup or - replication tools. See the example below. -- **Changes to a data volume are made directly:** - Without the overhead of a copy-on-write mechanism. This is good for - very large files. -- **Changes to a data volume will not be included at the next commit:** - Because they are not recorded as regular filesystem changes in the - top layer of the [*Union File System*](../../terms/layer/#ufs-def) -- **Volumes persist until no containers use them:** - As they are a reference counted resource. The container does not need to be - running to share its volumes, but running it can help protect it - against accidental removal via `docker rm`. + - **Data volumes can be shared and reused between containers:** + This is the feature that makes data volumes so powerful. You can + use it for anything from hot database upgrades to custom backup or + replication tools. See the example below. + - **Changes to a data volume are made directly:** + Without the overhead of a copy-on-write mechanism. This is good for + very large files. + - **Changes to a data volume will not be included at the next commit:** + Because they are not recorded as regular filesystem changes in the + top layer of the [*Union File System*](../../terms/layer/#ufs-def) + - **Volumes persist until no containers use them:** + As they are a reference counted resource. The container does not need to be + running to share its volumes, but running it can help protect it + against accidental removal via `docker rm`. Each container can have zero or more data volumes. @@ -82,8 +82,8 @@ Interestingly, you can mount the volumes that came from the $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash This allows you to abstract the actual data source from users of that -data, similar to -[*ambassador\_pattern\_linking*](../ambassador_pattern_linking/#ambassador-pattern-linking). +data, similar to [*Ambassador Pattern Linking*]( +../ambassador_pattern_linking/#ambassador-pattern-linking). If you remove containers that mount volumes, including the initial DATA container, or the middleman, the volumes will not be deleted until there @@ -117,40 +117,34 @@ New in version v0.5.0. ### Note for OS/X users and remote daemon users: -OS/X users run `boot2docker` to create a minimalist -virtual machine running the docker daemon. That virtual machine then -launches docker commands on behalf of the OS/X command line. The means -that `host directories` refer to directories in the -`boot2docker` virtual machine, not the OS/X -filesystem. +OS/X users run `boot2docker` to create a minimalist virtual machine running +the docker daemon. That virtual machine then launches docker commands on +behalf of the OS/X command line. The means that `host directories` refer to +directories in the `boot2docker` virtual machine, not the OS/X filesystem. -Similarly, anytime when the docker daemon is on a remote machine, the -`host directories` always refer to directories on -the daemon’s machine. +Similarly, anytime when the docker daemon is on a remote machine, the +`host directories` always refer to directories on the daemon's machine. ### Backup, restore, or migrate data volumes -You cannot back up volumes using `docker export`, -`docker save` and `docker cp` -because they are external to images. Instead you can use -`--volumes-from` to start a new container that can -access the data-container’s volume. For example: +You cannot back up volumes using `docker export`, `docker save` and `docker cp` +because they are external to images. Instead you can use `--volumes-from` to +start a new container that can access the data-container's volume. For example: $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data -- `-rm` - remove the container when it exits -- `--volumes-from DATA` - attach to the volumes - shared by the `DATA` container -- `-v $(pwd):/backup` - bind mount the current - directory into the container; to write the tar file to -- `busybox` - a small simpler image - good for - quick maintenance -- `tar cvf /backup/backup.tar /data` - creates an - uncompressed tar file of all the files in the `/data` - directory + - `-rm`: + remove the container when it exits + - `--volumes-from DATA`: + attach to the volumes shared by the `DATA` container + - `-v $(pwd):/backup`: + bind mount the current directory into the container; to write the tar file to + - `busybox`: + a small simpler image - good for quick maintenance + - `tar cvf /backup/backup.tar /data`: + creates an uncompressed tar file of all the files in the `/data` directory -Then to restore to the same container, or another that you’ve made -elsewhere: +Then to restore to the same container, or another that you`ve made elsewhere: # create a new data container $ sudo docker run -v /data -name DATA2 busybox true @@ -167,12 +161,11 @@ restore testing using your preferred tools. ## Known Issues -- [Issue 2702](https://github.com/dotcloud/docker/issues/2702): + - [Issue 2702](https://github.com/dotcloud/docker/issues/2702): "lxc-start: Permission denied - failed to mount" could indicate a permissions problem with AppArmor. Please see the issue for a workaround. -- [Issue 2528](https://github.com/dotcloud/docker/issues/2528): the + - [Issue 2528](https://github.com/dotcloud/docker/issues/2528): the busybox container is used to make the resulting container as small and simple as possible - whenever you need to interact with the data in the volume you mount it into another container. - diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md index c71aa60e10..38eda476ed 100644 --- a/docs/sources/use/workingwithrepository.md +++ b/docs/sources/use/workingwithrepository.md @@ -8,7 +8,7 @@ page_keywords: repo, repositories, usage, pull image, push image, image, documen A *repository* is a shareable collection of tagged [*images*](../../terms/image/#image-def) that together create the file -systems for containers. The repository’s name is a label that indicates +systems for containers. The repository's name is a label that indicates the provenance of the repository, i.e. who created it and where the original copy is located. @@ -44,17 +44,15 @@ they really help people get started quickly! You could also use control of who accesses your images, but we will only refer to public repositories in these examples. -- Top-level repositories can easily be recognized by **not** having a - `/` (slash) in their name. These repositories - can generally be trusted. -- User repositories always come in the form of - `/`. This is what your - published images will look like if you push to the public Central - Registry. -- Only the authenticated user can push to their *username* namespace - on the Central Registry. -- User images are not checked, it is therefore up to you whether or - not you trust the creator of this image. +- Top-level repositories can easily be recognized by **not** having a + `/` (slash) in their name. These repositories can generally be trusted. +- User repositories always come in the form of `/`. + This is what your published images will look like if you push to the public + Central Registry. +- Only the authenticated user can push to their *username* namespace + on the Central Registry. +- User images are not checked, it is therefore up to you whether or not you + trust the creator of this image. ## Find Public Images on the Central Index @@ -79,9 +77,9 @@ There you can see two example results: `centos` and `slantview/centos-chef-solo`. The second result shows that it comes from the public repository of a user, `slantview/`, while the first result -(`centos`) doesn’t explicitly list a repository so +(`centos`) doesn't explicitly list a repository so it comes from the trusted Central Repository. The `/` -character separates a user’s repository and the image name. +character separates a user's repository and the image name. Once you have found the image name, you can download it: @@ -91,7 +89,7 @@ Once you have found the image name, you can download it: 539c0211cd76: Download complete What can you do with that image? Check out the -[*Examples*](../../examples/#example-list) and, when you’re ready with +[*Examples*](../../examples/#example-list) and, when you're ready with your own image, come back here to learn how to share it. ## Contributing to the Central Registry @@ -109,13 +107,13 @@ namespace for your public repositories. If your username is available then `docker` will also prompt you to enter a password and your e-mail address. It will -then automatically log you in. Now you’re ready to commit and push your +then automatically log you in. Now you're ready to commit and push your own images! ## Committing a Container to a Named Image When you make changes to an existing image, those changes get saved to a -container’s file system. You can then promote that container to become +container's file system. You can then promote that container to become an image by making a `commit`. In addition to converting the container to an image, this is also your opportunity to name the image, specifically a name that includes your user name from @@ -146,17 +144,13 @@ when you push a commit. ### To setup a trusted build 1. Create a [Docker Index account](https://index.docker.io/) and login. -2. Link your GitHub account through the `Link Accounts` - menu. +2. Link your GitHub account through the `Link Accounts` menu. 3. [Configure a Trusted build](https://index.docker.io/builds/). -4. Pick a GitHub project that has a `Dockerfile` - that you want to build. -5. Pick the branch you want to build (the default is the - `master` branch). +4. Pick a GitHub project that has a `Dockerfile` that you want to build. +5. Pick the branch you want to build (the default is the `master` branch). 6. Give the Trusted Build a name. 7. Assign an optional Docker tag to the Build. -8. Specify where the `Dockerfile` is located. The - default is `/`. +8. Specify where the `Dockerfile` is located. The default is `/`. Once the Trusted Build is configured it will automatically trigger a build, and in a few minutes, if there are no errors, you will see your @@ -168,22 +162,20 @@ If you want to see the status of your Trusted Builds you can go to your index, and it will show you the status of your builds, and the build history. -Once you’ve created a Trusted Build you can deactivate or delete it. You -cannot however push to a Trusted Build with the `docker push` -command. You can only manage it by committing code to your -GitHub repository. +Once you`ve created a Trusted Build you can deactivate or delete it. You +cannot however push to a Trusted Build with the `docker push` command. +You can only manage it by committing code to your GitHub repository. You can create multiple Trusted Builds per repository and configure them -to point to specific `Dockerfile`‘s or Git branches. +to point to specific Dockerfile's or Git branches. ## Private Registry Private registries and private shared repositories are only possible by -hosting [your own -registry](https://github.com/dotcloud/docker-registry). To push or pull -to a repository on your own registry, you must prefix the tag with the -address of the registry’s host (a `.` or -`:` is used to identify a host), like this: +hosting [your own registry](https://github.com/dotcloud/docker-registry). +To push or pull to a repository on your own registry, you must prefix the +tag with the address of the registry's host (a `.` or `:` is used to identify +a host), like this: # Tag to create a repository with the full registry location. # The location (e.g. localhost.localdomain:5000) becomes @@ -193,7 +185,7 @@ address of the registry’s host (a `.` or # Push the new repository to its home location on localhost sudo docker push localhost.localdomain:5000/repo_name -Once a repository has your registry’s host name as part of the tag, you +Once a repository has your registry's host name as part of the tag, you can push and pull it like any other repository, but it will **not** be searchable (or indexed at all) in the Central Index, and there will be no user name checking performed. Your registry will function completely @@ -203,8 +195,8 @@ independently from the Central Index. See also -[Docker Blog: How to use your own -registry](http://blog.docker.io/2013/07/how-to-use-your-own-registry/) +[Docker Blog: How to use your own registry]( +http://blog.docker.io/2013/07/how-to-use-your-own-registry/) ## Authentication File @@ -212,11 +204,11 @@ The authentication is stored in a json file, `.dockercfg` located in your home directory. It supports multiple registry urls. -`docker login` will create the -"[https://index.docker.io/v1/](https://index.docker.io/v1/)" key. +`docker login` will create the "[https://index.docker.io/v1/]( +https://index.docker.io/v1/)" key. -`docker login https://my-registry.com` will create -the "[https://my-registry.com](https://my-registry.com)" key. +`docker login https://my-registry.com` will create the +"[https://my-registry.com](https://my-registry.com)" key. For example: From ada86fc5b736d8b3209429c584384fd9974a148a Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 24 Apr 2014 22:12:21 +1000 Subject: [PATCH 025/219] Looking into some broken links, I noticed that we don't need to use relative paths, and also fixed some broken images. There are still more todo - next PR I think :) Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/mkdocs.yml | 17 ++++++++-------- docs/sources/articles/baseimages.md | 2 +- docs/sources/articles/security.md | 2 +- docs/sources/examples/apt-cacher-ng.md | 2 +- docs/sources/examples/couchdb_data_volumes.md | 2 +- docs/sources/examples/hello_world.md | 6 +++--- docs/sources/examples/mongodb.md | 2 +- docs/sources/examples/nodejs_web_app.md | 2 +- docs/sources/examples/postgresql_service.md | 4 ++-- docs/sources/examples/python_web_app.md | 2 +- .../sources/examples/running_redis_service.md | 2 +- docs/sources/examples/running_riak_service.md | 2 +- docs/sources/examples/running_ssh_service.md | 2 +- docs/sources/examples/using_supervisord.md | 2 +- docs/sources/installation/amazon.md | 6 +++--- docs/sources/installation/binaries.md | 4 ++-- docs/sources/installation/fedora.md | 2 +- docs/sources/installation/mac.md | 2 +- docs/sources/installation/openSUSE.md | 2 +- docs/sources/installation/rhel.md | 2 +- docs/sources/installation/softlayer.md | 2 +- docs/sources/installation/ubuntulinux.md | 6 +++--- .../api/archive/docker_remote_api_v1.6.md | 4 ++-- .../api/archive/docker_remote_api_v1.7.md | 6 +++--- .../api/archive/docker_remote_api_v1.8.md | 6 +++--- .../reference/api/docker_io_oauth_api.md | 2 +- .../reference/api/docker_remote_api_v1.10.md | 4 ++-- .../reference/api/docker_remote_api_v1.11.md | 4 ++-- .../reference/api/docker_remote_api_v1.9.md | 4 ++-- .../reference/api/registry_index_spec.md | 4 ++-- docs/sources/reference/builder.md | 14 ++++++------- docs/sources/reference/commandline/cli.md | 20 +++++++++---------- docs/sources/reference/run.md | 14 ++++++------- docs/sources/terms/container.md | 2 +- docs/sources/terms/filesystem.md | 4 ++-- docs/sources/terms/image.md | 6 +++--- docs/sources/terms/layer.md | 2 +- docs/sources/use/basics.md | 4 ++-- docs/sources/use/chef.md | 2 +- docs/sources/use/puppet.md | 2 +- docs/sources/use/working_with_volumes.md | 4 ++-- docs/sources/use/workingwithrepository.md | 6 +++--- 42 files changed, 95 insertions(+), 94 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 38cec6ac14..3538642717 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -116,11 +116,12 @@ pages: - ['jsearch.md', '**HIDDEN**'] # - ['static_files/README.md', 'static_files', 'README'] -#- ['terms/index.md', '**HIDDEN**'] -# - ['terms/layer.md', 'terms', 'layer'] -# - ['terms/index.md', 'terms', 'Home'] -# - ['terms/registry.md', 'terms', 'registry'] -# - ['terms/container.md', 'terms', 'container'] -# - ['terms/repository.md', 'terms', 'repository'] -# - ['terms/filesystem.md', 'terms', 'filesystem'] -# - ['terms/image.md', 'terms', 'image'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/layer.md', '**HIDDEN**', 'layer'] +- ['terms/index.md', '**HIDDEN**', 'Home'] +- ['terms/registry.md', '**HIDDEN**', 'registry'] +- ['terms/container.md', '**HIDDEN**', 'container'] +- ['terms/repository.md', '**HIDDEN**', 'repository'] +- ['terms/filesystem.md', '**HIDDEN**', 'filesystem'] +- ['terms/image.md', '**HIDDEN**', 'image'] + diff --git a/docs/sources/articles/baseimages.md b/docs/sources/articles/baseimages.md index 3754bab6aa..c795b7a0a7 100644 --- a/docs/sources/articles/baseimages.md +++ b/docs/sources/articles/baseimages.md @@ -5,7 +5,7 @@ page_keywords: Examples, Usage, base image, docker, documentation, examples # Create a Base Image So you want to create your own [*Base Image*]( -../../terms/image/#base-image-def)? Great! +/terms/image/#base-image-def)? Great! The specific process will depend heavily on the Linux distribution you want to package. We have some examples below, and you are encouraged to diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md index 4519248015..69284db836 100644 --- a/docs/sources/articles/security.md +++ b/docs/sources/articles/security.md @@ -38,7 +38,7 @@ of another container. Of course, if the host system is setup accordingly, containers can interact with each other through their respective network interfaces — just like they can interact with external hosts. When you specify public ports for your containers or use -[*links*](../../use/working_with_links_names/#working-with-links-names) +[*links*](/use/working_with_links_names/#working-with-links-names) then IP traffic is allowed between containers. They can ping each other, send/receive UDP packets, and establish TCP connections, but that can be restricted if necessary. From a network architecture point of view, all diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md index bfcf1ed232..0293ac5d0b 100644 --- a/docs/sources/examples/apt-cacher-ng.md +++ b/docs/sources/examples/apt-cacher-ng.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, networking, debian, ubuntu > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup). +> access*](/installation/binaries/#dockergroup). > - **If you're using OS X or docker via TCP** then you shouldn't use > sudo. diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 6af8e2fd1e..10abe7af02 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, networking, couchdb, data > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) Here's an example of using data volumes to share the same data between two CouchDB containers. This could be used for hot upgrades, testing diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md index 9bcc619896..c7e821136c 100644 --- a/docs/sources/examples/hello_world.md +++ b/docs/sources/examples/hello_world.md @@ -15,7 +15,7 @@ like `/var/lib/docker/repositories: permission denied` you may have an incomplete Docker installation or insufficient privileges to access docker on your machine. -Please refer to [*Installation*](../../installation/) +Please refer to [*Installation*](/installation/) for installation instructions. ## Hello World @@ -26,7 +26,7 @@ for installation instructions. > more information please see [*Check your Docker > install*](#check-your-docker-installation). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) This is the most basic example available for using Docker. @@ -71,7 +71,7 @@ See the example in action > more information please see [*Check your Docker > install*](#check-your-docker-installation). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) And now for the most boring daemon ever written! diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md index bf907891da..36a5a58ad8 100644 --- a/docs/sources/examples/mongodb.md +++ b/docs/sources/examples/mongodb.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, networking, mongodb > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) The goal of this example is to show how you can build your own Docker images with MongoDB pre-installed. We will do that by constructing a diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index 0c04836b98..f7d63dadcf 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, node, centos > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) The goal of this example is to show you how you can build your own Docker images from a parent image using a `Dockerfile` diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md index 93abc06e39..1a10cd4415 100644 --- a/docs/sources/examples/postgresql_service.md +++ b/docs/sources/examples/postgresql_service.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, postgresql > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) ## Installing PostgreSQL on Docker @@ -87,7 +87,7 @@ And run the PostgreSQL server container (in the foreground): $ sudo docker run -rm -P -name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use [*Link -Containers*](../../use/working_with_links_names/#working-with-links-names), +Containers*](/use/working_with_links_names/#working-with-links-names), or we can access it from our host (or the network). > **Note**: diff --git a/docs/sources/examples/python_web_app.md b/docs/sources/examples/python_web_app.md index fc454a390a..e761003a9e 100644 --- a/docs/sources/examples/python_web_app.md +++ b/docs/sources/examples/python_web_app.md @@ -10,7 +10,7 @@ page_keywords: docker, example, python, web app > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) While using Dockerfiles is the preferred way to create maintainable and repeatable images, its useful to know how you can try things out and diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md index 2598af2897..2bfa8a05bc 100644 --- a/docs/sources/examples/running_redis_service.md +++ b/docs/sources/examples/running_redis_service.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, networking, redis > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) Very simple, no frills, Redis service attached to a web application using a link. diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md index 577ae76aa3..61594f9cd8 100644 --- a/docs/sources/examples/running_riak_service.md +++ b/docs/sources/examples/running_riak_service.md @@ -10,7 +10,7 @@ page_keywords: docker, example, package installation, networking, riak > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) The goal of this example is to show you how to build a Docker image with Riak pre-installed. diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index 20d5c12326..864d10c726 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -9,7 +9,7 @@ page_keywords: docker, example, package installation, networking > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) The following Dockerfile sets up an sshd service in a container that you can use to connect to and inspect other container's volumes, or to get diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/examples/using_supervisord.md index 6faa456080..8e85ae05d2 100644 --- a/docs/sources/examples/using_supervisord.md +++ b/docs/sources/examples/using_supervisord.md @@ -10,7 +10,7 @@ page_keywords: docker, supervisor, process management > more information please see [*Check your Docker > install*](../hello_world/#running-examples). > - **If you don't like sudo** then see [*Giving non-root -> access*](../../installation/binaries/#dockergroup) +> access*](/installation/binaries/#dockergroup) Traditionally a Docker container runs a single process when it is launched, for example an Apache daemon or a SSH server daemon. Often diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index 06ee65a772..61a12d6b43 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -59,8 +59,8 @@ add the *ubuntu* user to it so that you don't have to use `sudo` for every Docker command. Once you`ve got Docker installed, you're ready to try it out – head on -over to the [*First steps with Docker*](../../use/basics/) or -[*Examples*](../../examples/) section. +over to the [*First steps with Docker*](/use/basics/) or +[*Examples*](/examples/) section. ## Amazon QuickStart (Release Candidate - March 2014) @@ -100,4 +100,4 @@ QuickStart*](#amazon-quickstart) to pick an image (or use one of your own) and skip the step with the *User Data*. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux) instructions. -Continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. +Continue with the [*Hello World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md index f33508c9d7..b02c28828b 100644 --- a/docs/sources/installation/binaries.md +++ b/docs/sources/installation/binaries.md @@ -80,7 +80,7 @@ all the client commands. > **Warning**: > The *docker* group (or the group specified with `-G`) is root-equivalent; > see [*Docker Daemon Attack Surface*]( -> ../../articles/security/#dockersecurity-daemon) details. +> /articles/security/#dockersecurity-daemon) details. ## Upgrades @@ -99,4 +99,4 @@ Then follow the regular installation steps. # run a container and open an interactive shell in the container sudo ./docker run -i -t ubuntu /bin/bash -Continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. +Continue with the [*Hello World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index bd82674b01..70d8c1462e 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -61,4 +61,4 @@ Now let's verify that Docker is working. sudo docker run -i -t fedora /bin/bash **Done!**, now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 71fd9f5fed..1cef06b55b 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -143,7 +143,7 @@ If you feel the need to connect to the VM, you can simply run: # Pwd: tcuser You can now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +World*](/examples/hello_world/#hello-world) example. ## Learn More diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md index b4fa9183a5..2d7804d291 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/openSUSE.md @@ -61,4 +61,4 @@ Docker daemon. **Done!** Now continue with the [*Hello World*]( -../../examples/hello_world/#hello-world) example. +/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 715cca74a2..874e92adc8 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -68,7 +68,7 @@ Now let's verify that Docker is working. sudo docker run -i -t fedora /bin/bash **Done!** -Now continue with the [*Hello World*](../../examples/hello_world/#hello-world) example. +Now continue with the [*Hello World*](/examples/hello_world/#hello-world) example. ## Issues? diff --git a/docs/sources/installation/softlayer.md b/docs/sources/installation/softlayer.md index 6468829594..11a192c61a 100644 --- a/docs/sources/installation/softlayer.md +++ b/docs/sources/installation/softlayer.md @@ -33,4 +33,4 @@ page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, inst instructions. Continue with the [*Hello World*]( -../../examples/hello_world/#hello-world) example. +/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index c4152ec1c4..40dc541b6a 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -96,7 +96,7 @@ Now verify that the installation has worked by downloading the Type `exit` to exit **Done!**, now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +World*](/examples/hello_world/#hello-world) example. ## Ubuntu Raring 13.04 and Saucy 13.10 (64 bit) @@ -144,7 +144,7 @@ Now verify that the installation has worked by downloading the Type `exit` to exit **Done!**, now continue with the [*Hello -World*](../../examples/hello_world/#hello-world) example. +World*](/examples/hello_world/#hello-world) example. ### Giving non-root access @@ -168,7 +168,7 @@ than `docker` should own the Unix socket with the > **Warning**: > The *docker* group (or the group specified with `-G`) is > root-equivalent; see [*Docker Daemon Attack Surface*]( -> ../../articles/security/#dockersecurity-daemon) details. +> /articles/security/#dockersecurity-daemon) details. **Example:** diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md index 47c2b82e27..2ec7336a75 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -540,7 +540,7 @@ Attach to the container `id` When using the TTY setting is enabled in [`POST /containers/create` - ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + ](/api/docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md index ccc973925a..cf748a7f9b 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -489,7 +489,7 @@ Attach to the container `id` When using the TTY setting is enabled in [`POST /containers/create` - ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + ](/api/docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -980,7 +980,7 @@ Build an image from Dockerfile via stdin The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build - command*](../../../builder/#dockerbuilder)). + command*](/builder/#dockerbuilder)). Query Parameters: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md index 4bc4d01638..8520e9f1e5 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -531,7 +531,7 @@ Attach to the container `id` When using the TTY setting is enabled in [`POST /containers/create` - ](../../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), + ](/api/docker_remote_api_v1.9/#post--containers-create "POST /containers/create"), the stream is the raw data from the process PTY and client's stdin. When the TTY is disabled, then the stream is multiplexed to separate stdout and stderr. @@ -1024,7 +1024,7 @@ Build an image from Dockerfile via stdin The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build - command*](../../../builder/#dockerbuilder)). + command*](/reference/builder/#dockerbuilder)). Query Parameters: diff --git a/docs/sources/reference/api/docker_io_oauth_api.md b/docs/sources/reference/api/docker_io_oauth_api.md index 6cc4a6d546..dd2f6d75ec 100644 --- a/docs/sources/reference/api/docker_io_oauth_api.md +++ b/docs/sources/reference/api/docker_io_oauth_api.md @@ -96,7 +96,7 @@ an Authorization Code. prompt which asks the user to authorize your application with a description of the requested scopes. - ![](../../../static_files/io_oauth_authorization_page.png) + ![](/reference/api/_static/io_oauth_authorization_page.png) Once the user allows or denies your Authorization Request the user will be redirected back to your application. Included in that diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 474857bac3..c07f96f384 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1009,7 +1009,7 @@ Build an image from Dockerfile via stdin The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build - command*](../../builder/#dockerbuilder)). + command*](/reference/builder/#dockerbuilder)). Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index af47fdefbf..5e3fdcb0a8 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1012,7 +1012,7 @@ Build an image from Dockerfile via stdin The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build - command*](../../builder/#dockerbuilder)). + command*](/reference/builder/#dockerbuilder)). Query Parameters: diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index be1c76aee4..74e85a7ee6 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -9,7 +9,7 @@ page_keywords: API, Docker, rcli, REST, documentation - The Remote API has replaced rcli - The daemon listens on `unix:///var/run/docker.sock` but you can [*Bind Docker to another host/port or a Unix socket*]( - ../../../use/basics/#bind-docker). + /use/basics/#bind-docker). - The API tends to be REST, but for some complex commands, like `attach` or `pull`, the HTTP connection is hijacked to transport `stdout, stdin` and `stderr` @@ -1022,7 +1022,7 @@ Build an image from Dockerfile using a POST body. The archive must include a file called `Dockerfile` at its root. It may include any number of other files, which will be accessible in the build context (See the [*ADD build - command*](../../builder/#dockerbuilder)). + command*](/reference/builder/#dockerbuilder)). Query Parameters: diff --git a/docs/sources/reference/api/registry_index_spec.md b/docs/sources/reference/api/registry_index_spec.md index ab775b2237..fb5617d101 100644 --- a/docs/sources/reference/api/registry_index_spec.md +++ b/docs/sources/reference/api/registry_index_spec.md @@ -95,7 +95,7 @@ supports: ### Pull -![](../../../static_files/docker_pull_chart.png) +![](/static_files/docker_pull_chart.png) 1. Contact the Index to know where I should download “samalba/busybox” 2. Index replies: a. `samalba/busybox` is on Registry A b. here are the @@ -187,7 +187,7 @@ and for an active account. ### Push -![](../../../static_files/docker_push_chart.png) +![](/static_files/docker_push_chart.png) 1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials) diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index c976c118d7..3e278425c2 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -57,7 +57,7 @@ accelerating `docker build` significantly (indicated by `Using cache`): When you're done with your build, you're ready to look into [*Pushing a repository to its registry*]( -../../use/workingwithrepository/#image-push). +/use/workingwithrepository/#image-push). ## Format @@ -71,7 +71,7 @@ be UPPERCASE in order to distinguish them from arguments more easily. Docker evaluates the instructions in a Dockerfile in order. **The first instruction must be \`FROM\`** in order to specify the [*Base -Image*](../../terms/image/#base-image-def) from which you are building. +Image*](/terms/image/#base-image-def) from which you are building. Docker will treat lines that *begin* with `#` as a comment. A `#` marker anywhere else in the line will @@ -91,11 +91,11 @@ Or FROM : -The `FROM` instruction sets the [*Base Image*](../../terms/image/#base-image-def) +The `FROM` instruction sets the [*Base Image*](/terms/image/#base-image-def) for subsequent instructions. As such, a valid Dockerfile must have `FROM` as its first instruction. The image can be any valid image – it is especially easy to start by **pulling an image** from the [*Public Repositories*]( -../../use/workingwithrepository/#using-public-repositories). +/use/workingwithrepository/#using-public-repositories). `FROM` must be the first non-comment instruction in the Dockerfile. @@ -191,9 +191,9 @@ default specified in CMD. The `EXPOSE` instructions informs Docker that the container will listen on the specified network ports at runtime. Docker uses this information to interconnect containers using links (see -[*links*](../../use/working_with_links_names/#working-with-links-names)), +[*links*](/use/working_with_links_names/#working-with-links-names)), and to setup port redirection on the host system (see [*Redirect Ports*]( -../../use/port_redirection/#port-redirection)). +/use/port_redirection/#port-redirection)). ## ENV @@ -327,7 +327,7 @@ The `VOLUME` instruction will create a mount point with the specified name and mark it as holding externally mounted volumes from native host or other containers. For more information/examples and mounting instructions via docker client, refer to [*Share Directories via Volumes*]( -../../use/working_with_volumes/#volume-def) documentation. +/use/working_with_volumes/#volume-def) documentation. ## USER diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index d59bd37674..6388cb6192 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -197,7 +197,7 @@ and a "context". The files at `PATH` or `URL` are called the "context" of the build. The build process may refer to any of the files in the context, for example when using an -[*ADD*](../../builder/#dockerfile-add) instruction. When a single Dockerfile is +[*ADD*](/reference/builder/#dockerfile-add) instruction. When a single Dockerfile is given as `URL`, then no context is set. When a Git repository is set as `URL`, then the @@ -209,7 +209,7 @@ vpn's etc can be used to access private repositories See also: -[*Dockerfile Reference*](../../builder/#dockerbuilder). +[*Dockerfile Reference*](/reference/builder/#dockerbuilder). ### Examples: @@ -248,7 +248,7 @@ machine and that no parsing of the Dockerfile happens at the client side (where you're running `docker build`). That means that *all* the files at `PATH` get sent, not just the ones listed to -[*ADD*](../../builder/#dockerfile-add) in the Dockerfile. +[*ADD*](/reference/builder/#dockerfile-add) in the Dockerfile. The transfer of context from the local machine to the Docker daemon is what the `docker` client means when you see the @@ -843,10 +843,10 @@ of all containers. The `docker run` command can be used in combination with `docker commit` to [*change the command that a container runs*](#commit-an-existing-container). -See [*Redirect Ports*](../../../use/port_redirection/#port-redirection) +See [*Redirect Ports*](/use/port_redirection/#port-redirection) for more detailed information about the `--expose`, `-p`, `-P` and `--link` parameters, and [*Link Containers*]( -../../../use/working_with_links_names/#working-with-links-names) for specific +/use/working_with_links_names/#working-with-links-names) for specific examples using `--link`. ### Known Issues (run –volumes-from) @@ -914,14 +914,14 @@ manipulate the host's docker daemon. $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash This binds port `8080` of the container to port `80` on `127.0.0.1` of the host -machine. [*Redirect Ports*](../../../use/port_redirection/#port-redirection) +machine. [*Redirect Ports*](/use/port_redirection/#port-redirection) explains in detail how to manipulate ports in Docker. $ sudo docker run --expose 80 ubuntu bash This exposes port `80` of the container for use within a link without publishing the port to the host system's interfaces. [*Redirect Ports*]( -../../../use/port_redirection/#port-redirection) explains in detail how to +/use/port_redirection/#port-redirection) explains in detail how to manipulate ports in Docker. $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash @@ -1074,7 +1074,7 @@ Search the docker index for images -t, --trusted=false: Only show trusted builds See [*Find Public Images on the Central Index*]( -../../../use/workingwithrepository/#searching-central-index) for +/use/workingwithrepository/#searching-central-index) for more details on finding shared images from the commandline. ## start @@ -1107,7 +1107,7 @@ Tag an image into a repository You can group your images together using names and tags, and then upload them to [*Share Images via Repositories*]( -../../../use/workingwithrepository/#working-with-the-repository). +/use/workingwithrepository/#working-with-the-repository). ## top @@ -1124,4 +1124,4 @@ version. Usage: docker wait [OPTIONS] NAME -Block until a container stops, then print its exit code. \ No newline at end of file +Block until a container stops, then print its exit code. diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index f6f132a09d..9de08ec1a6 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -7,25 +7,25 @@ page_keywords: docker, run, configure, runtime **Docker runs processes in isolated containers**. When an operator executes `docker run`, she starts a process with its own file system, its own networking, and its own isolated process tree. -The [*Image*](../../terms/image/#image-def) which starts the process may +The [*Image*](/terms/image/#image-def) which starts the process may define defaults related to the binary to run, the networking to expose, and more, but `docker run` gives final control to the operator who starts the container from the image. That's the main -reason [*run*](../../commandline/cli/#cli-run) has more options than any +reason [*run*](/commandline/cli/#cli-run) has more options than any other `docker` command. -Every one of the [*Examples*](../../examples/#example-list) shows +Every one of the [*Examples*](/examples/#example-list) shows running containers, and so here we try to give more in-depth guidance. ## General Form -As you`ve seen in the [*Examples*](../../examples/#example-list), the +As you`ve seen in the [*Examples*](/examples/#example-list), the basic run command takes this form: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] To learn how to interpret the types of `[OPTIONS]`, -see [*Option types*](../../commandline/cli/#cli-options). +see [*Option types*](/commandline/cli/#cli-options). The list of `[OPTIONS]` breaks down into two groups: @@ -121,7 +121,7 @@ assign a name to the container with `--name` then the daemon will also generate a random string name too. The name can become a handy way to add meaning to a container since you can use this name when defining -[*links*](../../use/working_with_links_names/#working-with-links-names) +[*links*](/use/working_with_links_names/#working-with-links-names) (or any other place you need to identify a container). This works for both background and foreground Docker containers. @@ -372,7 +372,7 @@ And we can use that information to connect from another container as a client: --volumes-from="": Mount all volumes from the given container(s) The volumes commands are complex enough to have their own documentation in -section [*Share Directories via Volumes*](../../use/working_with_volumes/#volume-def). +section [*Share Directories via Volumes*](/use/working_with_volumes/#volume-def). A developer can define one or more `VOLUME's associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md index d7f139a3ca..5bedc3160e 100644 --- a/docs/sources/terms/container.md +++ b/docs/sources/terms/container.md @@ -6,7 +6,7 @@ page_keywords: containers, lxc, concepts, explanation, image, container ## Introduction -![](../../static_files/docker-filesystems-busyboxrw.png) +![](/terms/images/docker-filesystems-busyboxrw.png) Once you start a process in Docker from an [*Image*](image.md), Docker fetches the image and its [*Parent Image*](image.md), and repeats the process until it diff --git a/docs/sources/terms/filesystem.md b/docs/sources/terms/filesystem.md index 07f75e361e..5587e3c831 100644 --- a/docs/sources/terms/filesystem.md +++ b/docs/sources/terms/filesystem.md @@ -6,7 +6,7 @@ page_keywords: containers, files, linux ## Introduction -![](../../static_files/docker-filesystems-generic.png) +![](/terms/images/docker-filesystems-generic.png) In order for a Linux system to run, it typically needs two [file systems](http://en.wikipedia.org/wiki/Filesystem): @@ -32,4 +32,4 @@ usually what make your software packages dependent on one distribution versus another. Docker can help solve this problem by running multiple distributions at the same time. -![](../../static_files/docker-filesystems-multiroot.png) +![](/terms/images/docker-filesystems-multiroot.png) diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md index 031dd2e978..b10debcc6a 100644 --- a/docs/sources/terms/image.md +++ b/docs/sources/terms/image.md @@ -6,7 +6,7 @@ page_keywords: containers, lxc, concepts, explanation, image, container ## Introduction -![](../../static_files/docker-filesystems-debian.png) +![](/terms/images/docker-filesystems-debian.png) In Docker terminology, a read-only [*Layer*](../layer/#layer-def) is called an **image**. An image never changes. @@ -17,11 +17,11 @@ changes go to the top-most writeable layer, and underneath, the original file in the read-only image is unchanged. Since images don't change, images do not have state. -![](../../static_files/docker-filesystems-debianrw.png) +![](/terms/images/docker-filesystems-debianrw.png) ## Parent Image -![](../../static_files/docker-filesystems-multilayer.png) +![](/terms/images/docker-filesystems-multilayer.png) Each image may depend on one more image which forms the layer beneath it. We sometimes say that the lower image is the **parent** of the upper diff --git a/docs/sources/terms/layer.md b/docs/sources/terms/layer.md index 39c71fa4b6..b4b2ea4b7a 100644 --- a/docs/sources/terms/layer.md +++ b/docs/sources/terms/layer.md @@ -20,7 +20,7 @@ file system *over* the read-only file system. In fact there may be multiple read-only file systems stacked on top of each other. We think of each one of these file systems as a **layer**. -![](../../static_files/docker-filesystems-multilayer.png) +![](/terms/images/docker-filesystems-multilayer.png) At first, the top read-write layer has nothing in it, but any time a process creates a file, this happens in the top layer. And if something diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md index 2d1bf34f96..bbe967cc7c 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/use/basics.md @@ -17,7 +17,7 @@ like `/var/lib/docker/repositories: permission denied` you may have an incomplete docker installation or insufficient privileges to access Docker on your machine. -Please refer to [*Installation*](../../installation/#installation-list) +Please refer to [*Installation*](/installation/#installation-list) for installation instructions. ## Download a pre-built image @@ -172,4 +172,4 @@ You now have a image state from which you can create new instances. Read more about [*Share Images via Repositories*]( ../workingwithrepository/#working-with-the-repository) or -continue to the complete [*Command Line*](../../reference/commandline/cli/#cli) +continue to the complete [*Command Line*](/reference/commandline/cli/#cli) diff --git a/docs/sources/use/chef.md b/docs/sources/use/chef.md index 476b2919d0..5145107a38 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/use/chef.md @@ -7,7 +7,7 @@ page_keywords: chef, installation, usage, docker, documentation > **Note**: > Please note this is a community contributed installation path. The only > `official` installation is using the -> [*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation +> [*Ubuntu*](/installation/ubuntulinux/#ubuntu-linux) installation > path. This version may sometimes be out of date. ## Requirements diff --git a/docs/sources/use/puppet.md b/docs/sources/use/puppet.md index 026b1defb1..c1ac95f4ab 100644 --- a/docs/sources/use/puppet.md +++ b/docs/sources/use/puppet.md @@ -6,7 +6,7 @@ page_keywords: puppet, installation, usage, docker, documentation > *Note:* Please note this is a community contributed installation path. The > only `official` installation is using the -> [*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation +> [*Ubuntu*](/installation/ubuntulinux/#ubuntu-linux) installation > path. This version may sometimes be out of date. ## Requirements diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md index 4712a9fbff..5817309e62 100644 --- a/docs/sources/use/working_with_volumes.md +++ b/docs/sources/use/working_with_volumes.md @@ -8,7 +8,7 @@ page_keywords: Examples, Usage, volume, docker, documentation, examples A *data volume* is a specially-designated directory within one or more containers that bypasses the [*Union File -System*](../../terms/layer/#ufs-def) to provide several useful features +System*](/terms/layer/#ufs-def) to provide several useful features for persistent or shared data: - **Data volumes can be shared and reused between containers:** @@ -20,7 +20,7 @@ for persistent or shared data: very large files. - **Changes to a data volume will not be included at the next commit:** Because they are not recorded as regular filesystem changes in the - top layer of the [*Union File System*](../../terms/layer/#ufs-def) + top layer of the [*Union File System*](/terms/layer/#ufs-def) - **Volumes persist until no containers use them:** As they are a reference counted resource. The container does not need to be running to share its volumes, but running it can help protect it diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md index 38eda476ed..2ffca34ce5 100644 --- a/docs/sources/use/workingwithrepository.md +++ b/docs/sources/use/workingwithrepository.md @@ -7,7 +7,7 @@ page_keywords: repo, repositories, usage, pull image, push image, image, documen ## Introduction A *repository* is a shareable collection of tagged -[*images*](../../terms/image/#image-def) that together create the file +[*images*](/terms/image/#image-def) that together create the file systems for containers. The repository's name is a label that indicates the provenance of the repository, i.e. who created it and where the original copy is located. @@ -19,7 +19,7 @@ the home of "top-level" repositories and the Central Index. This registry may also include public "user" repositories. Docker is not only a tool for creating and managing your own -[*containers*](../../terms/container/#container-def) – **Docker is also +[*containers*](/terms/container/#container-def) – **Docker is also a tool for sharing**. The Docker project provides a Central Registry to host public repositories, namespaced by user, and a Central Index which provides user authentication and search over all the public @@ -89,7 +89,7 @@ Once you have found the image name, you can download it: 539c0211cd76: Download complete What can you do with that image? Check out the -[*Examples*](../../examples/#example-list) and, when you're ready with +[*Examples*](/examples/#example-list) and, when you're ready with your own image, come back here to learn how to share it. ## Contributing to the Central Registry From da8f6ffdeb54a2d99a70deb4ffc58fdff76d5880 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 23 Apr 2014 11:03:03 +0000 Subject: [PATCH 026/219] initial version of installation on ubuntu 14.04 LTS Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/ubuntulinux.md | 29 ++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index 40dc541b6a..04173cf917 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -4,10 +4,6 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, # Ubuntu -> **Warning**: -> These instructions have changed for 0.6. If you are upgrading from an -> earlier version, you will need to follow them again. - > **Note**: > Docker is still under heavy development! We don't recommend using it in > production yet, but we're getting closer with each release. Please see @@ -16,6 +12,7 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, Docker is supported on the following versions of Ubuntu: + - [*Ubuntu Trusty 14.04 (LTS) (64-bit)*](#ubuntu-trusty-1404-lts-64-bit) - [*Ubuntu Precise 12.04 (LTS) (64-bit)*](#ubuntu-precise-1204-lts-64-bit) - [*Ubuntu Raring 13.04 and Saucy 13.10 (64 bit)*](#ubuntu-raring-1304-and-saucy-1310-64-bit) @@ -23,6 +20,30 @@ Docker is supported on the following versions of Ubuntu: Please read [*Docker and UFW*](#docker-and-ufw), if you plan to use [UFW (Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW) +## Ubuntu Trusty 14.04 (LTS) (64-bit) + +Ubuntu Trusty comes with a 3.13.0 Linux kernel, and a `docker.io` package which +installs all its prerequisites from Ubuntu's repository. + +> **Note**: +> Ubuntu (and Debian) contain a much older KDE3/GNOME2 package called ``docker``, so the +> package and the executable are called ``docker.io``. + +### Installation + +To install the latest Ubuntu package (may not be the latest Docker release): + + sudo apt-get update + sudo apt-get install docker.io + sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + +To verify that everything has worked as expected: + + sudo docker run -i -t ubuntu /bin/bash + +Which should download the `ubuntu` image, and then start `bash` in a container. + + ## Ubuntu Precise 12.04 (LTS) (64-bit) This installation path should work at all times. From 5e5230cb190b6d78cf27706da447d179b79b29b6 Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Thu, 24 Apr 2014 16:29:12 +0300 Subject: [PATCH 027/219] Fix stray url from beta-docs warning (remove aws bucket URI) Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/theme/mkdocs/beta_warning.html | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/theme/mkdocs/beta_warning.html b/docs/theme/mkdocs/beta_warning.html index b7ffd28a9a..c46f9fd0be 100644 --- a/docs/theme/mkdocs/beta_warning.html +++ b/docs/theme/mkdocs/beta_warning.html @@ -27,6 +27,5 @@ {% if docker_version != docker_version|replace("-dev", "bingo") %}{{ docker_branch }} development branch{% else %}beta{% endif %} documentation for Docker version {{ docker_version }}. Please go to http://docs.docker.io for the current Docker release documentation. - {{ aws_bucket }} {% endif %} From 264dc8a46bb7f7679439d0a2c41c79a09642acab Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Thu, 24 Apr 2014 14:43:02 +0000 Subject: [PATCH 028/219] Add support for cpu hardcapping to cgroups. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/cgroups/cgroups.go | 2 ++ pkg/cgroups/fs/cpu.go | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index 3aac971340..9a498609b5 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -16,6 +16,8 @@ type Cgroup struct { Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties diff --git a/pkg/cgroups/fs/cpu.go b/pkg/cgroups/fs/cpu.go index 8eb0c4ff46..2664811851 100644 --- a/pkg/cgroups/fs/cpu.go +++ b/pkg/cgroups/fs/cpu.go @@ -19,6 +19,16 @@ func (s *cpuGroup) Set(d *data) error { return err } } + if d.c.CpuPeriod != 0 { + if err := writeFile(dir, "cpu.cfs_period_us", strconv.FormatInt(d.c.CpuPeriod, 10)); err != nil { + return err + } + } + if d.c.CpuQuota != 0 { + if err := writeFile(dir, "cpu.cfs_quota_us", strconv.FormatInt(d.c.CpuQuota, 10)); err != nil { + return err + } + } return nil } From d26ea78e42ebf18219b88e01c6252f30aa764aa2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 10 Apr 2014 21:41:31 +0000 Subject: [PATCH 029/219] Move apparmor into security sub dir Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 0e85c0e4be..ce51db37f9 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -13,6 +13,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/libcontainer/security/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" From 60a90970bc4add3547064004f08c19ab5027141b Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 10 Apr 2014 23:03:52 +0000 Subject: [PATCH 030/219] Add restrictions to proc in libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/create.go | 3 + daemon/execdriver/native/driver.go | 7 ++ pkg/libcontainer/nsinit/init.go | 2 +- pkg/libcontainer/nsinit/mount.go | 67 +++++++++++-------- .../security/restrict/restrict.go | 46 +++++++++++++ 5 files changed, 96 insertions(+), 29 deletions(-) create mode 100644 pkg/libcontainer/security/restrict/restrict.go diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index ef17ce7042..1edbd17ad3 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -25,6 +25,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container container.Cgroups.Name = c.ID // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + container.Context["restriction_path"] = d.restrictionPath if err := d.createNetwork(container, c); err != nil { return nil, err @@ -81,6 +82,8 @@ func (d *driver) setPrivileged(container *libcontainer.Container) error { c.Enabled = true } container.Cgroups.DeviceAccess = true + delete(container.Context, "restriction_path") + if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "unconfined" } diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index ab82cdcc65..31a2eb0dae 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -62,6 +62,7 @@ type driver struct { root string initPath string activeContainers map[string]*exec.Cmd + restrictionPath string } func NewDriver(root, initPath string) (*driver, error) { @@ -72,8 +73,14 @@ func NewDriver(root, initPath string) (*driver, error) { if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil { return nil, err } + restrictionPath := filepath.Join(root, "empty") + if err := os.MkdirAll(restrictionPath, 0700); err != nil { + return nil, err + } + return &driver{ root: root, + restrictionPath: restrictionPath, initPath: initPath, activeContainers: make(map[string]*exec.Cmd), }, nil diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index ce51db37f9..fb3a895a78 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -61,7 +61,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol label.Init() ns.logger.Println("setup mount namespace") - if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil { + if err := setupNewMountNamespace(rootfs, console, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := system.Sethostname(container.Hostname); err != nil { diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index dd6b1c8a43..e4869a0ecb 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" "io/ioutil" "os" @@ -21,9 +22,9 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD // // There is no need to unmount the new mounts because as soon as the mount namespace // is no longer in use, the mounts will be removed automatically -func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool, mountLabel string) error { +func setupNewMountNamespace(rootfs, console string, container *libcontainer.Container) error { flag := syscall.MS_PRIVATE - if noPivotRoot { + if container.NoPivotRoot { flag = syscall.MS_SLAVE } if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { @@ -32,44 +33,28 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) } - if err := mountSystem(rootfs, mountLabel); err != nil { + if err := mountSystem(rootfs, container.Context["mount_label"]); err != nil { return fmt.Errorf("mount system %s", err) } - - for _, m := range bindMounts { - var ( - flags = syscall.MS_BIND | syscall.MS_REC - dest = filepath.Join(rootfs, m.Destination) - ) - if !m.Writable { - flags = flags | syscall.MS_RDONLY - } - if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { - return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) - } - if !m.Writable { - if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { - return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) - } - } - if m.Private { - if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { - return fmt.Errorf("mounting %s private %s", dest, err) - } + if err := setupBindmounts(rootfs, container.Mounts); err != nil { + return fmt.Errorf("bind mounts %s", err) + } + if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { + if err := restrict.Restrict(rootfs, restrictionPath); err != nil { + return fmt.Errorf("restrict %s", err) } } - if err := copyDevNodes(rootfs); err != nil { return fmt.Errorf("copy dev nodes %s", err) } - if err := setupPtmx(rootfs, console, mountLabel); err != nil { + if err := setupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } if err := system.Chdir(rootfs); err != nil { return fmt.Errorf("chdir into %s %s", rootfs, err) } - if noPivotRoot { + if container.NoPivotRoot { if err := rootMsMove(rootfs); err != nil { return err } @@ -79,7 +64,7 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons } } - if readonly { + if container.ReadonlyFs { if err := system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mounting %s as readonly %s", rootfs, err) } @@ -263,3 +248,29 @@ func remountSys() error { } return nil } + +func setupBindmounts(rootfs string, bindMounts []libcontainer.Mount) error { + for _, m := range bindMounts { + var ( + flags = syscall.MS_BIND | syscall.MS_REC + dest = filepath.Join(rootfs, m.Destination) + ) + if !m.Writable { + flags = flags | syscall.MS_RDONLY + } + if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) + } + if !m.Writable { + if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { + return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) + } + } + if m.Private { + if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { + return fmt.Errorf("mounting %s private %s", dest, err) + } + } + } + return nil +} diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go new file mode 100644 index 0000000000..d5c1dbbe26 --- /dev/null +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -0,0 +1,46 @@ +package restrict + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/system" + "path/filepath" + "syscall" +) + +const flags = syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY + +var restrictions = map[string]string{ + // dirs + "/proc/sys": "", + "/proc/irq": "", + "/proc/acpi": "", + + // files + "/proc/sysrq-trigger": "/dev/null", + "/proc/kcore": "/dev/null", +} + +// Restrict locks down access to many areas of proc +// by using the asumption that the user does not have mount caps to +// revert the changes made here +func Restrict(rootfs, empty string) error { + for dest, source := range restrictions { + dest = filepath.Join(rootfs, dest) + + // we don't have a "/dev/null" for dirs so have the requester pass a dir + // for us to bind mount + switch source { + case "": + source = empty + default: + source = filepath.Join(rootfs, source) + } + if err := system.Mount(source, dest, "bind", flags, ""); err != nil { + return fmt.Errorf("unable to mount %s over %s %s", source, dest, err) + } + if err := system.Mount("", dest, "bind", flags|syscall.MS_REMOUNT, ""); err != nil { + return fmt.Errorf("unable to mount %s over %s %s", source, dest, err) + } + } + return nil +} From 0779a8c3287fbf7ff1938df10897b551b839cbee Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 10 Apr 2014 23:27:27 +0000 Subject: [PATCH 031/219] Add lxc support for restricting proc Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/lxc/driver.go | 34 +++++++++++++++++---------- daemon/execdriver/lxc/lxc_template.go | 9 ++++++- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 1ebb73e807..1232d608a3 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -59,9 +59,10 @@ func init() { } type driver struct { - root string // root path for the driver to use - apparmor bool - sharedRoot bool + root string // root path for the driver to use + apparmor bool + sharedRoot bool + restrictionPath string } func NewDriver(root string, apparmor bool) (*driver, error) { @@ -69,10 +70,15 @@ func NewDriver(root string, apparmor bool) (*driver, error) { if err := linkLxcStart(root); err != nil { return nil, err } + restrictionPath := filepath.Join(root, "empty") + if err := os.MkdirAll(restrictionPath, 0700); err != nil { + return nil, err + } return &driver{ - apparmor: apparmor, - root: root, - sharedRoot: rootIsShared(), + apparmor: apparmor, + root: root, + sharedRoot: rootIsShared(), + restrictionPath: restrictionPath, }, nil } @@ -403,14 +409,16 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { if err := LxcTemplateCompiled.Execute(fo, struct { *execdriver.Command - AppArmor bool - ProcessLabel string - MountLabel string + AppArmor bool + ProcessLabel string + MountLabel string + RestrictionSource string }{ - Command: c, - AppArmor: d.apparmor, - ProcessLabel: process, - MountLabel: mount, + Command: c, + AppArmor: d.apparmor, + ProcessLabel: process, + MountLabel: mount, + RestrictionSource: d.restrictionPath, }); err != nil { return "", err } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index f4cb3d19eb..25c227ef15 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -109,8 +109,15 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS {{if .AppArmor}} lxc.aa_profile = unconfined {{else}} -#lxc.aa_profile = unconfined +# not unconfined {{end}} +{{else}} +# restrict access to proc +lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/sys none bind,ro 0 0 +lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/irq none bind,ro 0 0 +lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/acpi none bind,ro 0 0 +lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/sysrq-trigger none bind,ro 0 0 +lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/kcore none bind,ro 0 0 {{end}} # limits From 81e5026a6afb282589704fd5f6bcac9ed50108ea Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 11:45:39 +0000 Subject: [PATCH 032/219] No not mount sysfs by default for non privilged containers Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/lxc/lxc_template.go | 2 + daemon/execdriver/native/create.go | 11 +- integration-cli/docker_cli_run_test.go | 275 +------------------------ pkg/libcontainer/container.go | 27 ++- pkg/libcontainer/nsinit/mount.go | 42 ++-- 5 files changed, 67 insertions(+), 290 deletions(-) diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 25c227ef15..bc94e7a19d 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -88,7 +88,9 @@ lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noex # WARNING: sysfs is a known attack vector and should probably be disabled # if your userspace allows it. eg. see http://bit.ly/T9CkqJ +{{if .Privileged}} lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 +{{end}} {{if .Tty}} lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 1edbd17ad3..e26ff8d2b8 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -82,6 +82,9 @@ func (d *driver) setPrivileged(container *libcontainer.Container) error { c.Enabled = true } container.Cgroups.DeviceAccess = true + + // add sysfs as a mount for privileged containers + container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "sysfs"}) delete(container.Context, "restriction_path") if apparmor.IsEnabled() { @@ -101,7 +104,13 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error { for _, m := range c.Mounts { - container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) + container.Mounts = append(container.Mounts, libcontainer.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Writable: m.Writable, + Private: m.Private, + }) } return nil } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index d356f5f4de..40781294ae 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -389,279 +389,24 @@ func TestMultipleVolumesFrom(t *testing.T) { logDone("run - multiple volumes from") } -// this tests verifies the ID format for the container -func TestVerifyContainerID(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") - out, exit, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err) - } - if exit != 0 { - t.Fatalf("expected exit code 0 received %d", exit) - } - match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) - if err != nil { - t.Fatal(err) - } - if !match { - t.Fatalf("Invalid container ID: %s", out) +func TestSysNotAvaliableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "ls", "/sys/kernel") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("sys should not be available in a non privileged container") } deleteAllContainers() - logDone("run - verify container ID") + logDone("run - sys not avaliable in non privileged container") } -// Test that creating a container with a volume doesn't crash. Regression test for #995. -func TestCreateVolume(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") - if _, err := runCommand(cmd); err != nil { - t.Fatal(err) +func TestSysAvaliableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "ls", "/sys/kernel") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("sys should be available in privileged container") } deleteAllContainers() - logDone("run - create docker mangaed volume") -} - -func TestExitCode(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") - - exit, err := runCommand(cmd) - if err == nil { - t.Fatal("should not have a non nil error") - } - if exit != 72 { - t.Fatalf("expected exit code 72 received %d", exit) - } - - deleteAllContainers() - - logDone("run - correct exit code") -} - -func TestUserDefaultsToRoot(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "id") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - if !strings.Contains(out, "uid=0(root) gid=0(root)") { - t.Fatalf("expected root user got %s", out) - } - deleteAllContainers() - - logDone("run - default user") -} - -func TestUserByName(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - if !strings.Contains(out, "uid=0(root) gid=0(root)") { - t.Fatalf("expected root user got %s", out) - } - deleteAllContainers() - - logDone("run - user by name") -} - -func TestUserByID(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { - t.Fatalf("expected daemon user got %s", out) - } - deleteAllContainers() - - logDone("run - user by id") -} - -func TestUserNotFound(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") - - _, err := runCommand(cmd) - if err == nil { - t.Fatal("unknown user should cause container to fail") - } - deleteAllContainers() - - logDone("run - user not found") -} - -func TestRunTwoConcurrentContainers(t *testing.T) { - group := sync.WaitGroup{} - group.Add(2) - - for i := 0; i < 2; i++ { - go func() { - defer group.Done() - cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") - if _, err := runCommand(cmd); err != nil { - t.Fatal(err) - } - }() - } - - group.Wait() - - deleteAllContainers() - - logDone("run - two concurrent containers") -} - -func TestEnvironment(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "busybox", "env") - cmd.Env = append(os.Environ(), - "TRUE=false", - "TRICKY=tri\ncky\n", - ) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - - actualEnv := strings.Split(out, "\n") - if actualEnv[len(actualEnv)-1] == "" { - actualEnv = actualEnv[:len(actualEnv)-1] - } - sort.Strings(actualEnv) - - goodEnv := []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOME=/", - "HOSTNAME=testing", - "FALSE=true", - "TRUE=false", - "TRICKY=tri", - "cky", - "", - } - sort.Strings(goodEnv) - if len(goodEnv) != len(actualEnv) { - t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", ")) - } - for i := range goodEnv { - if actualEnv[i] != goodEnv[i] { - t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) - } - } - - deleteAllContainers() - - logDone("run - verify environment") -} - -func TestContainerNetwork(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") - if _, err := runCommand(cmd); err != nil { - t.Fatal(err) - } - - deleteAllContainers() - - logDone("run - test container network via ping") -} - -// Issue #4681 -func TestLoopbackWhenNetworkDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "127.0.0.1") - if _, err := runCommand(cmd); err != nil { - t.Fatal(err) - } - - deleteAllContainers() - - logDone("run - test container loopback when networking disabled") -} - -func TestLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ip", "a", "show", "up") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err, out) - } - - interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(out, -1) - if len(interfaces) != 1 { - t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) - } - if !strings.HasSuffix(interfaces[0], ": lo") { - t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) - } - - deleteAllContainers() - - logDone("run - test loopback only exists when networking disabled") -} - -func TestPrivilegedCanMknod(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err) - } - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) - } - deleteAllContainers() - - logDone("run - test privileged can mknod") -} - -func TestUnPrivilegedCanMknod(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err) - } - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) - } - deleteAllContainers() - - logDone("run - test un-privileged can mknod") -} - -func TestPrivilegedCanMount(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - t.Fatal(err) - } - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) - } - deleteAllContainers() - - logDone("run - test privileged can mount") -} - -func TestUnPrivilegedCannotMount(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") - - out, _, err := runCommandWithOutput(cmd) - if err == nil { - t.Fatal(err, out) - } - - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) - } - deleteAllContainers() - - logDone("run - test un-privileged cannot mount") + logDone("run - sys avaliable in privileged container") } diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index c7cac35428..1e032c0642 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -23,7 +23,7 @@ type Container struct { Networks []*Network `json:"networks,omitempty"` // nil for host's network stack Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) - Mounts []Mount `json:"mounts,omitempty"` + Mounts Mounts `json:"mounts,omitempty"` } // Network defines configuration for a container's networking stack @@ -38,11 +38,22 @@ type Network struct { Mtu int `json:"mtu,omitempty"` } -// Bind mounts from the host system to the container -// -type Mount struct { - Source string `json:"source"` // Source path, in the host namespace - Destination string `json:"destination"` // Destination path, in the container - Writable bool `json:"writable"` - Private bool `json:"private"` +type Mounts []Mount + +func (s Mounts) OfType(t string) Mounts { + out := Mounts{} + for _, m := range s { + if m.Type == t { + out = append(out, m) + } + } + return out +} + +type Mount struct { + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` // Source path, in the host namespace + Destination string `json:"destination,omitempty"` // Destination path, in the container + Writable bool `json:"writable,omitempty"` + Private bool `json:"private,omitempty"` } diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index e4869a0ecb..ee480328c0 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -17,6 +17,14 @@ import ( // default mount point flags const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV +type mount struct { + source string + path string + device string + flags int + data string +} + // setupNewMountNamespace is used to initialize a new mount namespace for an new // container in the rootfs that is specified. // @@ -33,7 +41,7 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) } - if err := mountSystem(rootfs, container.Context["mount_label"]); err != nil { + if err := mountSystem(rootfs, container); err != nil { return fmt.Errorf("mount system %s", err) } if err := setupBindmounts(rootfs, container.Mounts); err != nil { @@ -183,19 +191,8 @@ func setupConsole(rootfs, console string, mountLabel string) error { // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts // inside the mount namespace -func mountSystem(rootfs string, mountLabel string) error { - for _, m := range []struct { - source string - path string - device string - flags int - data string - }{ - {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, - {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, - {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, - {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, - } { +func mountSystem(rootfs string, container *libcontainer.Container) error { + for _, m := range newSystemMounts(rootfs, container.Context["mount_label"], container.Mounts) { if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { return fmt.Errorf("mkdirall %s %s", m.path, err) } @@ -249,8 +246,8 @@ func remountSys() error { return nil } -func setupBindmounts(rootfs string, bindMounts []libcontainer.Mount) error { - for _, m := range bindMounts { +func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { + for _, m := range bindMounts.OfType("bind") { var ( flags = syscall.MS_BIND | syscall.MS_REC dest = filepath.Join(rootfs, m.Destination) @@ -274,3 +271,16 @@ func setupBindmounts(rootfs string, bindMounts []libcontainer.Mount) error { } return nil } + +func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount { + systemMounts := []mount{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, + } + + if len(mounts.OfType("sysfs")) == 1 { + systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}) + } + return systemMounts +} From 5ba1242bdc309352c2b0b9a1ef9e07fe835e4857 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 14:30:09 +0000 Subject: [PATCH 033/219] Mount over dev and only copy allowed nodes in Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/create.go | 2 ++ pkg/libcontainer/nsinit/mount.go | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index e26ff8d2b8..76d65bd15c 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -34,6 +34,8 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setPrivileged(container); err != nil { return nil, err } + } else { + container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"}) } if err := d.setupCgroups(container, c); err != nil { return nil, err diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index ee480328c0..81229ef2d3 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -47,14 +47,14 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont if err := setupBindmounts(rootfs, container.Mounts); err != nil { return fmt.Errorf("bind mounts %s", err) } + if err := copyDevNodes(rootfs); err != nil { + return fmt.Errorf("copy dev nodes %s", err) + } if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { if err := restrict.Restrict(rootfs, restrictionPath); err != nil { return fmt.Errorf("restrict %s", err) } } - if err := copyDevNodes(rootfs); err != nil { - return fmt.Errorf("copy dev nodes %s", err) - } if err := setupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } @@ -273,12 +273,20 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { } func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount { - systemMounts := []mount{ - {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + devMounts := []mount{ {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } + systemMounts := []mount{ + {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + } + + if len(mounts.OfType("devtmpfs")) == 1 { + systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: "mode=755"}) + } + systemMounts = append(systemMounts, devMounts...) + if len(mounts.OfType("sysfs")) == 1 { systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}) } From de3d51b0a824e31d7e245aed958d53f436456699 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 14:42:53 +0000 Subject: [PATCH 034/219] Move console into its own package Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/console/console.go | 46 +++++++++++++++++++++++++++++ pkg/libcontainer/nsinit/mount.go | 41 +++---------------------- 2 files changed, 50 insertions(+), 37 deletions(-) create mode 100644 pkg/libcontainer/console/console.go diff --git a/pkg/libcontainer/console/console.go b/pkg/libcontainer/console/console.go new file mode 100644 index 0000000000..deee544184 --- /dev/null +++ b/pkg/libcontainer/console/console.go @@ -0,0 +1,46 @@ +// +build linux + +package console + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/system" + "os" + "path/filepath" + "syscall" +) + +// Setup initializes the proper /dev/console inside the rootfs path +func Setup(rootfs, consolePath, mountLabel string) error { + oldMask := system.Umask(0000) + defer system.Umask(oldMask) + + stat, err := os.Stat(consolePath) + if err != nil { + return fmt.Errorf("stat console %s %s", consolePath, err) + } + var ( + st = stat.Sys().(*syscall.Stat_t) + dest = filepath.Join(rootfs, "dev/console") + ) + if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("remove %s %s", dest, err) + } + if err := os.Chmod(consolePath, 0600); err != nil { + return err + } + if err := os.Chown(consolePath, 0, 0); err != nil { + return err + } + if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { + return fmt.Errorf("mknod %s %s", dest, err) + } + if err := label.SetFileLabel(consolePath, mountLabel); err != nil { + return fmt.Errorf("set file label %s %s", dest, err) + } + if err := system.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("bind %s to %s %s", consolePath, dest, err) + } + return nil +} diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 81229ef2d3..c85058a9f6 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/console" "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" "io/ioutil" @@ -155,40 +156,6 @@ func copyDevNode(rootfs, node string) error { return nil } -// setupConsole ensures that the container has a proper /dev/console setup -func setupConsole(rootfs, console string, mountLabel string) error { - oldMask := system.Umask(0000) - defer system.Umask(oldMask) - - stat, err := os.Stat(console) - if err != nil { - return fmt.Errorf("stat console %s %s", console, err) - } - var ( - st = stat.Sys().(*syscall.Stat_t) - dest = filepath.Join(rootfs, "dev/console") - ) - if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove %s %s", dest, err) - } - if err := os.Chmod(console, 0600); err != nil { - return err - } - if err := os.Chown(console, 0, 0); err != nil { - return err - } - if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { - return fmt.Errorf("mknod %s %s", dest, err) - } - if err := label.SetFileLabel(console, mountLabel); err != nil { - return fmt.Errorf("SetFileLabel Failed %s %s", dest, err) - } - if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("bind %s to %s %s", console, dest, err) - } - return nil -} - // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts // inside the mount namespace func mountSystem(rootfs string, container *libcontainer.Container) error { @@ -205,7 +172,7 @@ func mountSystem(rootfs string, container *libcontainer.Container) error { // setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and // finishes setting up /dev/console -func setupPtmx(rootfs, console string, mountLabel string) error { +func setupPtmx(rootfs, consolePath, mountLabel string) error { ptmx := filepath.Join(rootfs, "dev/ptmx") if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { return err @@ -213,8 +180,8 @@ func setupPtmx(rootfs, console string, mountLabel string) error { if err := os.Symlink("pts/ptmx", ptmx); err != nil { return fmt.Errorf("symlink dev ptmx %s", err) } - if console != "" { - if err := setupConsole(rootfs, console, mountLabel); err != nil { + if consolePath != "" { + if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { return err } } From 05b611574f85c7ff7d479e04e01ac2b57b233591 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:06:56 +0000 Subject: [PATCH 035/219] Refactor mounts into pkg to make changes easier Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- .../{nsinit/mount.go => mount/init.go} | 153 +++--------------- pkg/libcontainer/mount/msmoveroot.go | 19 +++ pkg/libcontainer/mount/nodes/nodes.go | 49 ++++++ pkg/libcontainer/mount/pivotroot.go | 31 ++++ pkg/libcontainer/mount/ptmx.go | 26 +++ pkg/libcontainer/mount/readonly.go | 12 ++ pkg/libcontainer/mount/remount.go | 31 ++++ pkg/libcontainer/nsinit/execin.go | 5 +- pkg/libcontainer/nsinit/init.go | 3 +- 9 files changed, 191 insertions(+), 138 deletions(-) rename pkg/libcontainer/{nsinit/mount.go => mount/init.go} (51%) create mode 100644 pkg/libcontainer/mount/msmoveroot.go create mode 100644 pkg/libcontainer/mount/nodes/nodes.go create mode 100644 pkg/libcontainer/mount/pivotroot.go create mode 100644 pkg/libcontainer/mount/ptmx.go create mode 100644 pkg/libcontainer/mount/readonly.go create mode 100644 pkg/libcontainer/mount/remount.go diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/mount/init.go similarity index 51% rename from pkg/libcontainer/nsinit/mount.go rename to pkg/libcontainer/mount/init.go index c85058a9f6..2a5e47a4a4 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/mount/init.go @@ -1,15 +1,14 @@ // +build linux -package nsinit +package mount import ( "fmt" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/console" + "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" - "io/ioutil" "os" "path/filepath" "syscall" @@ -26,13 +25,13 @@ type mount struct { data string } -// setupNewMountNamespace is used to initialize a new mount namespace for an new -// container in the rootfs that is specified. -// -// There is no need to unmount the new mounts because as soon as the mount namespace -// is no longer in use, the mounts will be removed automatically -func setupNewMountNamespace(rootfs, console string, container *libcontainer.Container) error { - flag := syscall.MS_PRIVATE +// InitializeMountNamespace setups up the devices, mount points, and filesystems for use inside a +// new mount namepsace +func InitializeMountNamespace(rootfs, console string, container *libcontainer.Container) error { + var ( + err error + flag = syscall.MS_PRIVATE + ) if container.NoPivotRoot { flag = syscall.MS_SLAVE } @@ -48,7 +47,7 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont if err := setupBindmounts(rootfs, container.Mounts); err != nil { return fmt.Errorf("bind mounts %s", err) } - if err := copyDevNodes(rootfs); err != nil { + if err := nodes.CopyN(rootfs, nodes.DefaultNodes); err != nil { return fmt.Errorf("copy dev nodes %s", err) } if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { @@ -56,7 +55,7 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont return fmt.Errorf("restrict %s", err) } } - if err := setupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { + if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } if err := system.Chdir(rootfs); err != nil { @@ -64,18 +63,17 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont } if container.NoPivotRoot { - if err := rootMsMove(rootfs); err != nil { - return err - } + err = MsMoveRoot(rootfs) } else { - if err := rootPivot(rootfs); err != nil { - return err - } + err = PivotRoot(rootfs) + } + if err != nil { + return err } if container.ReadonlyFs { - if err := system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("mounting %s as readonly %s", rootfs, err) + if err := SetReadonly(); err != nil { + return fmt.Errorf("set readonly %s", err) } } @@ -84,78 +82,6 @@ func setupNewMountNamespace(rootfs, console string, container *libcontainer.Cont return nil } -// use a pivot root to setup the rootfs -func rootPivot(rootfs string) error { - pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") - if err != nil { - return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err) - } - if err := system.Pivotroot(rootfs, pivotDir); err != nil { - return fmt.Errorf("pivot_root %s", err) - } - if err := system.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - // path to pivot dir now changed, update - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("unmount pivot_root dir %s", err) - } - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("remove pivot_root dir %s", err) - } - return nil -} - -// use MS_MOVE and chroot to setup the rootfs -func rootMsMove(rootfs string) error { - if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { - return fmt.Errorf("mount move %s into / %s", rootfs, err) - } - if err := system.Chroot("."); err != nil { - return fmt.Errorf("chroot . %s", err) - } - if err := system.Chdir("/"); err != nil { - return fmt.Errorf("chdir / %s", err) - } - return nil -} - -// copyDevNodes mknods the hosts devices so the new container has access to them -func copyDevNodes(rootfs string) error { - oldMask := system.Umask(0000) - defer system.Umask(oldMask) - - for _, node := range []string{ - "null", - "zero", - "full", - "random", - "urandom", - "tty", - } { - if err := copyDevNode(rootfs, node); err != nil { - return err - } - } - return nil -} - -func copyDevNode(rootfs, node string) error { - stat, err := os.Stat(filepath.Join("/dev", node)) - if err != nil { - return err - } - var ( - dest = filepath.Join(rootfs, "dev", node) - st = stat.Sys().(*syscall.Stat_t) - ) - if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { - return fmt.Errorf("copy %s %s", node, err) - } - return nil -} - // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts // inside the mount namespace func mountSystem(rootfs string, container *libcontainer.Container) error { @@ -170,49 +96,6 @@ func mountSystem(rootfs string, container *libcontainer.Container) error { return nil } -// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and -// finishes setting up /dev/console -func setupPtmx(rootfs, consolePath, mountLabel string) error { - ptmx := filepath.Join(rootfs, "dev/ptmx") - if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { - return err - } - if err := os.Symlink("pts/ptmx", ptmx); err != nil { - return fmt.Errorf("symlink dev ptmx %s", err) - } - if consolePath != "" { - if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { - return err - } - } - return nil -} - -// remountProc is used to detach and remount the proc filesystem -// commonly needed with running a new process inside an existing container -func remountProc() error { - if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil { - return err - } - if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { - return err - } - return nil -} - -func remountSys() error { - if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil { - if err != syscall.EINVAL { - return err - } - } else { - if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { - return err - } - } - return nil -} - func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { for _, m := range bindMounts.OfType("bind") { var ( diff --git a/pkg/libcontainer/mount/msmoveroot.go b/pkg/libcontainer/mount/msmoveroot.go new file mode 100644 index 0000000000..b336c86495 --- /dev/null +++ b/pkg/libcontainer/mount/msmoveroot.go @@ -0,0 +1,19 @@ +// +build linux + +package mount + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/system" + "syscall" +) + +func MsMoveRoot(rootfs string) error { + if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { + return fmt.Errorf("mount move %s into / %s", rootfs, err) + } + if err := system.Chroot("."); err != nil { + return fmt.Errorf("chroot . %s", err) + } + return system.Chdir("/") +} diff --git a/pkg/libcontainer/mount/nodes/nodes.go b/pkg/libcontainer/mount/nodes/nodes.go new file mode 100644 index 0000000000..5022f85b0b --- /dev/null +++ b/pkg/libcontainer/mount/nodes/nodes.go @@ -0,0 +1,49 @@ +// +build linux + +package nodes + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/system" + "os" + "path/filepath" + "syscall" +) + +// Default list of device nodes to copy +var DefaultNodes = []string{ + "null", + "zero", + "full", + "random", + "urandom", + "tty", +} + +// CopyN copies the device node from the host into the rootfs +func CopyN(rootfs string, nodesToCopy []string) error { + oldMask := system.Umask(0000) + defer system.Umask(oldMask) + + for _, node := range nodesToCopy { + if err := Copy(rootfs, node); err != nil { + return err + } + } + return nil +} + +func Copy(rootfs, node string) error { + stat, err := os.Stat(filepath.Join("/dev", node)) + if err != nil { + return err + } + var ( + dest = filepath.Join(rootfs, "dev", node) + st = stat.Sys().(*syscall.Stat_t) + ) + if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { + return fmt.Errorf("copy %s %s", node, err) + } + return nil +} diff --git a/pkg/libcontainer/mount/pivotroot.go b/pkg/libcontainer/mount/pivotroot.go new file mode 100644 index 0000000000..447f5904b2 --- /dev/null +++ b/pkg/libcontainer/mount/pivotroot.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/system" + "io/ioutil" + "os" + "path/filepath" + "syscall" +) + +func PivotRoot(rootfs string) error { + pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") + if err != nil { + return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err) + } + if err := system.Pivotroot(rootfs, pivotDir); err != nil { + return fmt.Errorf("pivot_root %s", err) + } + if err := system.Chdir("/"); err != nil { + return fmt.Errorf("chdir / %s", err) + } + // path to pivot dir now changed, update + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("unmount pivot_root dir %s", err) + } + return os.Remove(pivotDir) +} diff --git a/pkg/libcontainer/mount/ptmx.go b/pkg/libcontainer/mount/ptmx.go new file mode 100644 index 0000000000..f6ca534637 --- /dev/null +++ b/pkg/libcontainer/mount/ptmx.go @@ -0,0 +1,26 @@ +// +build linux + +package mount + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer/console" + "os" + "path/filepath" +) + +func SetupPtmx(rootfs, consolePath, mountLabel string) error { + ptmx := filepath.Join(rootfs, "dev/ptmx") + if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Symlink("pts/ptmx", ptmx); err != nil { + return fmt.Errorf("symlink dev ptmx %s", err) + } + if consolePath != "" { + if err := console.Setup(rootfs, consolePath, mountLabel); err != nil { + return err + } + } + return nil +} diff --git a/pkg/libcontainer/mount/readonly.go b/pkg/libcontainer/mount/readonly.go new file mode 100644 index 0000000000..0658358ad6 --- /dev/null +++ b/pkg/libcontainer/mount/readonly.go @@ -0,0 +1,12 @@ +// +build linux + +package mount + +import ( + "github.com/dotcloud/docker/pkg/system" + "syscall" +) + +func SetReadonly() error { + return system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") +} diff --git a/pkg/libcontainer/mount/remount.go b/pkg/libcontainer/mount/remount.go new file mode 100644 index 0000000000..3e00509ae0 --- /dev/null +++ b/pkg/libcontainer/mount/remount.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import ( + "github.com/dotcloud/docker/pkg/system" + "syscall" +) + +func RemountProc() error { + if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil { + return err + } + if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { + return err + } + return nil +} + +func RemountSys() error { + if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil { + if err != syscall.EINVAL { + return err + } + } else { + if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { + return err + } + } + return nil +} diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index 9017af06e9..b79881015f 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/system" "os" "path/filepath" @@ -63,10 +64,10 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s if err := system.Unshare(syscall.CLONE_NEWNS); err != nil { return -1, err } - if err := remountProc(); err != nil { + if err := mount.RemountProc(); err != nil { return -1, fmt.Errorf("remount proc %s", err) } - if err := remountSys(); err != nil { + if err := mount.RemountSys(); err != nil { return -1, fmt.Errorf("remount sys %s", err) } goto dropAndExec diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index fb3a895a78..6e6b0e5a8e 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -12,6 +12,7 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" + "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/libcontainer/security/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/utils" @@ -61,7 +62,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol label.Init() ns.logger.Println("setup mount namespace") - if err := setupNewMountNamespace(rootfs, console, container); err != nil { + if err := mount.InitializeMountNamespace(rootfs, console, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := system.Sethostname(container.Hostname); err != nil { From a949d39f195e7b87288b10b0ef31843e6a3d8eb0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:15:28 +0000 Subject: [PATCH 036/219] Move rest of console functions to pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/console/console.go | 14 ++++++++++++ pkg/libcontainer/nsinit/init.go | 34 +++++++---------------------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/pkg/libcontainer/console/console.go b/pkg/libcontainer/console/console.go index deee544184..05cd08a92e 100644 --- a/pkg/libcontainer/console/console.go +++ b/pkg/libcontainer/console/console.go @@ -44,3 +44,17 @@ func Setup(rootfs, consolePath, mountLabel string) error { } return nil } + +func OpenAndDup(consolePath string) error { + slave, err := system.OpenTerminal(consolePath, syscall.O_RDWR) + if err != nil { + return fmt.Errorf("open terminal %s", err) + } + if err := system.Dup2(slave.Fd(), 0); err != nil { + return err + } + if err := system.Dup2(slave.Fd(), 1); err != nil { + return err + } + return system.Dup2(slave.Fd(), 2) +} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 6e6b0e5a8e..9aac9a40eb 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -12,6 +12,7 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" + "github.com/dotcloud/docker/pkg/libcontainer/console" "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/libcontainer/security/apparmor" @@ -22,7 +23,7 @@ import ( // Init is the init process that first runs inside a new namespace to setup mounts, users, networking, // and other options required for the new container. -func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { +func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { rootfs, err := utils.ResolveRootfs(uncleanRootfs) if err != nil { return err @@ -38,20 +39,16 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol ns.logger.Println("received context from parent") syncPipe.Close() - if console != "" { - ns.logger.Printf("setting up %s as console\n", console) - slave, err := system.OpenTerminal(console, syscall.O_RDWR) - if err != nil { - return fmt.Errorf("open terminal %s", err) - } - if err := dupSlave(slave); err != nil { - return fmt.Errorf("dup2 slave %s", err) + if consolePath != "" { + ns.logger.Printf("setting up %s as console\n", consolePath) + if err := console.OpenAndDup(consolePath); err != nil { + return err } } if _, err := system.Setsid(); err != nil { return fmt.Errorf("setsid %s", err) } - if console != "" { + if consolePath != "" { if err := system.Setctty(); err != nil { return fmt.Errorf("setctty %s", err) } @@ -62,7 +59,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol label.Init() ns.logger.Println("setup mount namespace") - if err := mount.InitializeMountNamespace(rootfs, console, container); err != nil { + if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := system.Sethostname(container.Hostname); err != nil { @@ -116,21 +113,6 @@ func setupUser(container *libcontainer.Container) error { return nil } -// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that -// the slave's fd is 0, or stdin -func dupSlave(slave *os.File) error { - if err := system.Dup2(slave.Fd(), 0); err != nil { - return err - } - if err := system.Dup2(slave.Fd(), 1); err != nil { - return err - } - if err := system.Dup2(slave.Fd(), 2); err != nil { - return err - } - return nil -} - // setupVethNetwork uses the Network config if it is not nil to initialize // the new veth interface inside the container for use by changing the name to eth0 // setting the MTU and IP address along with the default gateway From 156987c118f6f4067794e09e90aabeee0002d05c Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:26:23 +0000 Subject: [PATCH 037/219] Move mounts into types.go Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/container.go | 20 -------------------- pkg/libcontainer/mount/init.go | 11 +++++------ pkg/libcontainer/types.go | 20 ++++++++++++++++++++ 3 files changed, 25 insertions(+), 26 deletions(-) diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index 1e032c0642..ddcc6cab70 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -37,23 +37,3 @@ type Network struct { Gateway string `json:"gateway,omitempty"` Mtu int `json:"mtu,omitempty"` } - -type Mounts []Mount - -func (s Mounts) OfType(t string) Mounts { - out := Mounts{} - for _, m := range s { - if m.Type == t { - out = append(out, m) - } - } - return out -} - -type Mount struct { - Type string `json:"type,omitempty"` - Source string `json:"source,omitempty"` // Source path, in the host namespace - Destination string `json:"destination,omitempty"` // Destination path, in the container - Writable bool `json:"writable,omitempty"` - Private bool `json:"private,omitempty"` -} diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index 2a5e47a4a4..06b2c82f56 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -122,12 +122,9 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { return nil } +// TODO: this is crappy right now and should be cleaned up with a better way of handling system and +// standard bind mounts allowing them to be more dymanic func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount { - devMounts := []mount{ - {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, - {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, - } - systemMounts := []mount{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, } @@ -135,7 +132,9 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo if len(mounts.OfType("devtmpfs")) == 1 { systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: "mode=755"}) } - systemMounts = append(systemMounts, devMounts...) + systemMounts = append(systemMounts, + mount{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}) if len(mounts.OfType("sysfs")) == 1 { systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}) diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index d4818c3ffe..ade3c32f1d 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -11,6 +11,26 @@ var ( ErrUnsupported = errors.New("Unsupported method") ) +type Mounts []Mount + +func (s Mounts) OfType(t string) Mounts { + out := Mounts{} + for _, m := range s { + if m.Type == t { + out = append(out, m) + } + } + return out +} + +type Mount struct { + Type string `json:"type,omitempty"` + Source string `json:"source,omitempty"` // Source path, in the host namespace + Destination string `json:"destination,omitempty"` // Destination path, in the container + Writable bool `json:"writable,omitempty"` + Private bool `json:"private,omitempty"` +} + // namespaceList is used to convert the libcontainer types // into the names of the files located in /proc//ns/* for // each namespace From 7a0b3610664c2269fd5932f294adae72e6e54020 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:28:56 +0000 Subject: [PATCH 038/219] Move capabilities into security pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 2 +- pkg/libcontainer/{ => security}/capabilities/capabilities.go | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename pkg/libcontainer/{ => security}/capabilities/capabilities.go (100%) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 9aac9a40eb..22fdfbeeb7 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -11,11 +11,11 @@ import ( "github.com/dotcloud/docker/pkg/apparmor" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/capabilities" "github.com/dotcloud/docker/pkg/libcontainer/console" "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/libcontainer/security/apparmor" + "github.com/dotcloud/docker/pkg/libcontainer/security/capabilities" "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" diff --git a/pkg/libcontainer/capabilities/capabilities.go b/pkg/libcontainer/security/capabilities/capabilities.go similarity index 100% rename from pkg/libcontainer/capabilities/capabilities.go rename to pkg/libcontainer/security/capabilities/capabilities.go From 2d6c3674349c09318e8d1fb3ce43dbabc15c97da Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:31:45 +0000 Subject: [PATCH 039/219] Increment native driver version with these changes Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 31a2eb0dae..8b374d9938 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -23,7 +23,7 @@ import ( const ( DriverName = "native" - Version = "0.1" + Version = "0.2" BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root ) From 2d31aeb911fc94baa88f975110c5ccd45d041acb Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 11 Apr 2014 15:44:11 +0000 Subject: [PATCH 040/219] Update container.json and readme Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/README.md | 193 ++++++++++++++++++++++---------- pkg/libcontainer/container.json | 192 +++++++++++++++++++++++-------- 2 files changed, 277 insertions(+), 108 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index d6d0fbae44..31031b26cd 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -16,76 +16,149 @@ process are specified in this file. The configuration is used for each process Sample `container.json` file: ```json { + "mounts" : [ + { + "type" : "devtmpfs" + } + ], + "tty" : true, + "environment" : [ + "HOME=/", + "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", + "container=docker", + "TERM=xterm-256color" + ], "hostname" : "koye", + "cgroups" : { + "parent" : "docker", + "name" : "docker-koye" + }, + "capabilities_mask" : [ + { + "value" : 8, + "key" : "SETPCAP", + "enabled" : false + }, + { + "enabled" : false, + "value" : 16, + "key" : "SYS_MODULE" + }, + { + "value" : 17, + "key" : "SYS_RAWIO", + "enabled" : false + }, + { + "key" : "SYS_PACCT", + "value" : 20, + "enabled" : false + }, + { + "value" : 21, + "key" : "SYS_ADMIN", + "enabled" : false + }, + { + "value" : 23, + "key" : "SYS_NICE", + "enabled" : false + }, + { + "value" : 24, + "key" : "SYS_RESOURCE", + "enabled" : false + }, + { + "key" : "SYS_TIME", + "value" : 25, + "enabled" : false + }, + { + "enabled" : false, + "value" : 26, + "key" : "SYS_TTY_CONFIG" + }, + { + "key" : "AUDIT_WRITE", + "value" : 29, + "enabled" : false + }, + { + "value" : 30, + "key" : "AUDIT_CONTROL", + "enabled" : false + }, + { + "enabled" : false, + "key" : "MAC_OVERRIDE", + "value" : 32 + }, + { + "enabled" : false, + "key" : "MAC_ADMIN", + "value" : 33 + }, + { + "key" : "NET_ADMIN", + "value" : 12, + "enabled" : false + }, + { + "value" : 27, + "key" : "MKNOD", + "enabled" : true + } + ], "networks" : [ { - "gateway" : "172.17.42.1", + "mtu" : 1500, + "address" : "127.0.0.1/0", + "type" : "loopback", + "gateway" : "localhost" + }, + { + "mtu" : 1500, + "address" : "172.17.42.2/16", + "type" : "veth", "context" : { "bridge" : "docker0", "prefix" : "veth" }, - "address" : "172.17.0.2/16", - "type" : "veth", - "mtu" : 1500 - } - ], - "cgroups" : { - "parent" : "docker", - "name" : "11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620" - }, - "tty" : true, - "environment" : [ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOSTNAME=11bb30683fb0", - "TERM=xterm" - ], - "capabilities_mask" : [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" - ], - "context" : { - "apparmor_profile" : "docker-default" - }, - "mounts" : [ - { - "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/resolv.conf", - "writable" : false, - "destination" : "/etc/resolv.conf", - "private" : true - }, - { - "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hostname", - "writable" : false, - "destination" : "/etc/hostname", - "private" : true - }, - { - "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hosts", - "writable" : false, - "destination" : "/etc/hosts", - "private" : true + "gateway" : "172.17.42.1" } ], "namespaces" : [ - "NEWNS", - "NEWUTS", - "NEWIPC", - "NEWPID", - "NEWNET" + { + "key" : "NEWNS", + "value" : 131072, + "enabled" : true, + "file" : "mnt" + }, + { + "key" : "NEWUTS", + "value" : 67108864, + "enabled" : true, + "file" : "uts" + }, + { + "enabled" : true, + "file" : "ipc", + "key" : "NEWIPC", + "value" : 134217728 + }, + { + "file" : "pid", + "enabled" : true, + "value" : 536870912, + "key" : "NEWPID" + }, + { + "enabled" : true, + "file" : "net", + "key" : "NEWNET", + "value" : 1073741824 + } ] } ``` diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index f045315a41..f15a49ab05 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -1,50 +1,146 @@ { - "hostname": "koye", - "tty": true, - "environment": [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "namespaces": [ - "NEWIPC", - "NEWNS", - "NEWPID", - "NEWUTS", - "NEWNET" - ], - "capabilities_mask": [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" - ], - "networks": [{ - "type": "veth", - "context": { - "bridge": "docker0", - "prefix": "dock" - }, - "address": "172.17.0.100/16", - "gateway": "172.17.42.1", - "mtu": 1500 - } - ], - "cgroups": { - "name": "docker-koye", - "parent": "docker", - "memory": 5248000 - } + "mounts" : [ + { + "type" : "devtmpfs" + } + ], + "tty" : true, + "environment" : [ + "HOME=/", + "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", + "container=docker", + "TERM=xterm-256color" + ], + "hostname" : "koye", + "cgroups" : { + "parent" : "docker", + "name" : "docker-koye" + }, + "capabilities_mask" : [ + { + "value" : 8, + "key" : "SETPCAP", + "enabled" : false + }, + { + "enabled" : false, + "value" : 16, + "key" : "SYS_MODULE" + }, + { + "value" : 17, + "key" : "SYS_RAWIO", + "enabled" : false + }, + { + "key" : "SYS_PACCT", + "value" : 20, + "enabled" : false + }, + { + "value" : 21, + "key" : "SYS_ADMIN", + "enabled" : false + }, + { + "value" : 23, + "key" : "SYS_NICE", + "enabled" : false + }, + { + "value" : 24, + "key" : "SYS_RESOURCE", + "enabled" : false + }, + { + "key" : "SYS_TIME", + "value" : 25, + "enabled" : false + }, + { + "enabled" : false, + "value" : 26, + "key" : "SYS_TTY_CONFIG" + }, + { + "key" : "AUDIT_WRITE", + "value" : 29, + "enabled" : false + }, + { + "value" : 30, + "key" : "AUDIT_CONTROL", + "enabled" : false + }, + { + "enabled" : false, + "key" : "MAC_OVERRIDE", + "value" : 32 + }, + { + "enabled" : false, + "key" : "MAC_ADMIN", + "value" : 33 + }, + { + "key" : "NET_ADMIN", + "value" : 12, + "enabled" : false + }, + { + "value" : 27, + "key" : "MKNOD", + "enabled" : true + } + ], + "networks" : [ + { + "mtu" : 1500, + "address" : "127.0.0.1/0", + "type" : "loopback", + "gateway" : "localhost" + }, + { + "mtu" : 1500, + "address" : "172.17.42.2/16", + "type" : "veth", + "context" : { + "bridge" : "docker0", + "prefix" : "veth" + }, + "gateway" : "172.17.42.1" + } + ], + "namespaces" : [ + { + "key" : "NEWNS", + "value" : 131072, + "enabled" : true, + "file" : "mnt" + }, + { + "key" : "NEWUTS", + "value" : 67108864, + "enabled" : true, + "file" : "uts" + }, + { + "enabled" : true, + "file" : "ipc", + "key" : "NEWIPC", + "value" : 134217728 + }, + { + "file" : "pid", + "enabled" : true, + "value" : 536870912, + "key" : "NEWPID" + }, + { + "enabled" : true, + "file" : "net", + "key" : "NEWNET", + "value" : 1073741824 + } + ] } From 90678b31331de54598c7a6665c3e7a78bfe6ed63 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 18 Apr 2014 14:35:16 -0700 Subject: [PATCH 041/219] Update create with apparmor import Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_run_test.go | 277 +++++++++++++++++++++++++ 1 file changed, 277 insertions(+) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 40781294ae..5973f2fe1b 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -389,6 +389,283 @@ func TestMultipleVolumesFrom(t *testing.T) { logDone("run - multiple volumes from") } +// this tests verifies the ID format for the container +func TestVerifyContainerID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, exit, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + if exit != 0 { + t.Fatalf("expected exit code 0 received %d", exit) + } + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("Invalid container ID: %s", out) + } + + deleteAllContainers() + + logDone("run - verify container ID") +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func TestCreateVolume(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - create docker mangaed volume") +} + +func TestExitCode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") + + exit, err := runCommand(cmd) + if err == nil { + t.Fatal("should not have a non nil error") + } + if exit != 72 { + t.Fatalf("expected exit code 72 received %d", exit) + } + + deleteAllContainers() + + logDone("run - correct exit code") +} + +func TestUserDefaultsToRoot(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - default user") +} + +func TestUserByName(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + t.Fatalf("expected root user got %s", out) + } + deleteAllContainers() + + logDone("run - user by name") +} + +func TestUserByID(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + t.Fatalf("expected daemon user got %s", out) + } + deleteAllContainers() + + logDone("run - user by id") +} + +func TestUserNotFound(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") + + _, err := runCommand(cmd) + if err == nil { + t.Fatal("unknown user should cause container to fail") + } + deleteAllContainers() + + logDone("run - user not found") +} + +func TestRunTwoConcurrentContainers(t *testing.T) { + group := sync.WaitGroup{} + group.Add(2) + + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + }() + } + + group.Wait() + + deleteAllContainers() + + logDone("run - two concurrent containers") +} + +func TestEnvironment(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + actualEnv := strings.Split(out, "\n") + if actualEnv[len(actualEnv)-1] == "" { + actualEnv = actualEnv[:len(actualEnv)-1] + } + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } + + deleteAllContainers() + + logDone("run - verify environment") +} + +func TestContainerNetwork(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container network via ping") +} + +// Issue #4681 +func TestLoopbackWhenNetworkDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "127.0.0.1") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - test container loopback when networking disabled") +} + +func TestLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ip", "a", "show", "up") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err, out) + } + + interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(out, -1) + if len(interfaces) != 1 { + t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) + } + if !strings.HasSuffix(interfaces[0], ": lo") { + t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) + } + + deleteAllContainers() + + logDone("run - test loopback only exists when networking disabled") +} + +func TestPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mknod") +} + +func TestUnPrivilegedCanMknod(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged can mknod") +} + +func TestPrivilegedCanMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + t.Fatal(err) + } + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + t.Fatalf("expected output ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test privileged can mount") +} + +func TestUnPrivilegedCannotMount(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + out, _, err := runCommandWithOutput(cmd) + if err == nil { + t.Fatal(err, out) + } + + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + t.Fatalf("expected output not ok received %s", actual) + } + deleteAllContainers() + + logDone("run - test un-privileged cannot mount") +} + func TestSysNotAvaliableInNonPrivilegedContainers(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "busybox", "ls", "/sys/kernel") if code, err := runCommand(cmd); err == nil || code == 0 { From fa5cabf9fe9e257d64638043ca2fd08a7bf96cb3 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 21 Apr 2014 12:07:07 -0700 Subject: [PATCH 042/219] Update init for new apparmor import path Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 22fdfbeeb7..67095fdba1 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -14,7 +14,6 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer/console" "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/libcontainer/security/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/security/capabilities" "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" From d5c9f61ecc1c8167322a8cc3b41f29a35c80b9b8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 23 Apr 2014 18:12:07 -0700 Subject: [PATCH 043/219] Ignore isnot exists errors for proc paths Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/security/restrict/restrict.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index d5c1dbbe26..291d6ca5dc 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -2,9 +2,11 @@ package restrict import ( "fmt" - "github.com/dotcloud/docker/pkg/system" + "os" "path/filepath" "syscall" + + "github.com/dotcloud/docker/pkg/system" ) const flags = syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY @@ -36,6 +38,9 @@ func Restrict(rootfs, empty string) error { source = filepath.Join(rootfs, source) } if err := system.Mount(source, dest, "bind", flags, ""); err != nil { + if os.IsNotExist(err) { + continue + } return fmt.Errorf("unable to mount %s over %s %s", source, dest, err) } if err := system.Mount("", dest, "bind", flags|syscall.MS_REMOUNT, ""); err != nil { From f188b9f623e23ee624aca8654bf00f49ee3bae29 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Thu, 24 Apr 2014 05:11:43 +0000 Subject: [PATCH 044/219] Separating cgroup Memory and MemoryReservation. This will allow for these to be set independently. Keep the current Docker behavior where Memory and MemoryReservation are set to the value of Memory. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- .../execdriver/native/configuration/parse.go | 22 +++++++++++++++---- .../native/configuration/parse_test.go | 18 ++++++++++++++- daemon/execdriver/native/create.go | 1 + pkg/cgroups/cgroups.go | 15 +++++++------ pkg/cgroups/fs/memory.go | 9 +++++--- pkg/cgroups/systemd/apply_systemd.go | 4 ++++ 6 files changed, 54 insertions(+), 15 deletions(-) diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go index 6d6c643919..c3846af910 100644 --- a/daemon/execdriver/native/configuration/parse.go +++ b/daemon/execdriver/native/configuration/parse.go @@ -21,10 +21,11 @@ var actions = map[string]Action{ "net.join": joinNetNamespace, // join another containers net namespace - "cgroups.cpu_shares": cpuShares, // set the cpu shares - "cgroups.memory": memory, // set the memory limit - "cgroups.memory_swap": memorySwap, // set the memory swap limit - "cgroups.cpuset.cpus": cpusetCpus, // set the cpus used + "cgroups.cpu_shares": cpuShares, // set the cpu shares + "cgroups.memory": memory, // set the memory limit + "cgroups.memory_reservation": memoryReservation, // set the memory reservation + "cgroups.memory_swap": memorySwap, // set the memory swap limit + "cgroups.cpuset.cpus": cpusetCpus, // set the cpus used "apparmor_profile": apparmorProfile, // set the apparmor profile to apply @@ -70,6 +71,19 @@ func memory(container *libcontainer.Container, context interface{}, value string return nil } +func memoryReservation(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set cgroups when they are disabled") + } + + v, err := utils.RAMInBytes(value) + if err != nil { + return err + } + container.Cgroups.MemoryReservation = v + return nil +} + func memorySwap(container *libcontainer.Container, context interface{}, value string) error { if container.Cgroups == nil { return fmt.Errorf("cannot set cgroups when they are disabled") diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go index 9034867b7b..c28176f2ef 100644 --- a/daemon/execdriver/native/configuration/parse_test.go +++ b/daemon/execdriver/native/configuration/parse_test.go @@ -93,7 +93,7 @@ func TestCpuShares(t *testing.T) { } } -func TestCgroupMemory(t *testing.T) { +func TestMemory(t *testing.T) { var ( container = template.New() opts = []string{ @@ -109,6 +109,22 @@ func TestCgroupMemory(t *testing.T) { } } +func TestMemoryReservation(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cgroups.memory_reservation=500m", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if expected := int64(500 * 1024 * 1024); container.Cgroups.MemoryReservation != expected { + t.Fatalf("expected memory reservation %d got %d", expected, container.Cgroups.MemoryReservation) + } +} + func TestAddCap(t *testing.T) { var ( container = template.New() diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index ef17ce7042..334a97ad4b 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -91,6 +91,7 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C if c.Resources != nil { container.Cgroups.CpuShares = c.Resources.CpuShares container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemoryReservation = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap } return nil diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index 9a498609b5..81e3eb551a 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -12,13 +12,14 @@ type Cgroup struct { Name string `json:"name,omitempty"` Parent string `json:"parent,omitempty"` - DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice - Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) - MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) - CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemoryReservation int64 `json:"memory_reservation,omitempty"` // Memory reservation or soft_limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties } diff --git a/pkg/cgroups/fs/memory.go b/pkg/cgroups/fs/memory.go index cf4bf5ab73..5315291197 100644 --- a/pkg/cgroups/fs/memory.go +++ b/pkg/cgroups/fs/memory.go @@ -13,7 +13,7 @@ type memoryGroup struct { func (s *memoryGroup) Set(d *data) error { dir, err := d.join("memory") // only return an error for memory if it was not specified - if err != nil && (d.c.Memory != 0 || d.c.MemorySwap != 0) { + if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) { return err } defer func() { @@ -22,12 +22,15 @@ func (s *memoryGroup) Set(d *data) error { } }() - if d.c.Memory != 0 || d.c.MemorySwap != 0 { + // Only set values if some config was specified. + if d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0 { if d.c.Memory != 0 { if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { return err } - if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.Memory, 10)); err != nil { + } + if d.c.MemoryReservation != 0 { + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(d.c.MemoryReservation, 10)); err != nil { return err } } diff --git a/pkg/cgroups/systemd/apply_systemd.go b/pkg/cgroups/systemd/apply_systemd.go index 7c26080d6e..e1246f6e70 100644 --- a/pkg/cgroups/systemd/apply_systemd.go +++ b/pkg/cgroups/systemd/apply_systemd.go @@ -121,6 +121,10 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { properties = append(properties, systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) } + if c.MemoryReservation != 0 { + properties = append(properties, + systemd1.Property{"MemorySoftLimit", dbus.MakeVariant(uint64(c.MemoryReservation))}) + } // TODO: MemorySwap not available in systemd if c.CpuShares != 0 { From 14b2a9de874ab80aaaa942b7b8a226bb56dfcd7f Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 00:17:45 +0000 Subject: [PATCH 045/219] Fix container.json sample to be loadable by nsinit. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/README.md | 40 ++++++++++---------- pkg/libcontainer/container.json | 43 ++++++++++----------- pkg/libcontainer/container_test.go | 60 ++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 41 deletions(-) create mode 100644 pkg/libcontainer/container_test.go diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index d6d0fbae44..224465ce1c 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -41,21 +41,21 @@ Sample `container.json` file: "TERM=xterm" ], "capabilities_mask" : [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" + { "key": "SETPCAP" }, + { "key": "SYS_MODULE" }, + { "key": "SYS_RAWIO" }, + { "key": "SYS_PACCT" }, + { "key": "SYS_ADMIN" }, + { "key": "SYS_NICE" }, + { "key": "SYS_RESOURCE" }, + { "key": "SYS_TIME" }, + { "key": "SYS_TTY_CONFIG" }, + { "key": "MKNOD" }, + { "key": "AUDIT_WRITE" }, + { "key": "AUDIT_CONTROL" }, + { "key": "MAC_OVERRIDE" }, + { "key": "MAC_ADMIN" }, + { "key": "NET_ADMIN" } ], "context" : { "apparmor_profile" : "docker-default" @@ -81,11 +81,11 @@ Sample `container.json` file: } ], "namespaces" : [ - "NEWNS", - "NEWUTS", - "NEWIPC", - "NEWPID", - "NEWNET" + { "key": "NEWNS" }, + { "key": "NEWUTS" }, + { "key": "NEWIPC" }, + { "key": "NEWPID" }, + { "key": "NEWNET" } ] } ``` diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index f045315a41..b0465d4890 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -8,28 +8,28 @@ "TERM=xterm-256color" ], "namespaces": [ - "NEWIPC", - "NEWNS", - "NEWPID", - "NEWUTS", - "NEWNET" + { "key": "NEWIPC" }, + { "key": "NEWNS" }, + { "key": "NEWPID" }, + { "key": "NEWUTS" }, + { "key": "NEWNET" } ], "capabilities_mask": [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" + { "key": "SETPCAP" }, + { "key": "SYS_MODULE" }, + { "key": "SYS_RAWIO" }, + { "key": "SYS_PACCT" }, + { "key": "SYS_ADMIN" }, + { "key": "SYS_NICE" }, + { "key": "SYS_RESOURCE" }, + { "key": "SYS_TIME" }, + { "key": "SYS_TTY_CONFIG" }, + { "key": "MKNOD" }, + { "key": "AUDIT_WRITE" }, + { "key": "AUDIT_CONTROL" }, + { "key": "MAC_OVERRIDE" }, + { "key": "MAC_ADMIN" }, + { "key": "NET_ADMIN" } ], "networks": [{ "type": "veth", @@ -45,6 +45,7 @@ "cgroups": { "name": "docker-koye", "parent": "docker", - "memory": 5248000 + "memory": 5248000, + "cpu_shares": 1024 } } diff --git a/pkg/libcontainer/container_test.go b/pkg/libcontainer/container_test.go new file mode 100644 index 0000000000..06e7979b0a --- /dev/null +++ b/pkg/libcontainer/container_test.go @@ -0,0 +1,60 @@ +package libcontainer + +import ( + "encoding/json" + "os" + "testing" +) + +func TestContainerJsonFormat(t *testing.T) { + f, err := os.Open("container.json") + if err != nil { + t.Fatal("Unable to open container.json") + } + defer f.Close() + + var container *Container + if err := json.NewDecoder(f).Decode(&container); err != nil { + t.Log("failed to decode container config") + t.FailNow() + } + if container.Hostname != "koye" { + t.Log("hostname is not set") + t.Fail() + } + + if !container.Tty { + t.Log("tty should be set to true") + t.Fail() + } + + if !container.Namespaces.Contains("NEWNET") { + t.Log("namespaces should contain NEWNET") + t.Fail() + } + + if container.Namespaces.Contains("NEWUSER") { + t.Log("namespaces should not contain NEWUSER") + t.Fail() + } + + if !container.CapabilitiesMask.Contains("SYS_ADMIN") { + t.Log("capabilities should contain SYS_ADMIN") + t.Fail() + } + + if container.CapabilitiesMask.Contains("SYS_CHROOT") { + t.Log("capabitlies should not contain SYS_CHROOT") + t.Fail() + } + + if container.Cgroups.CpuShares != 1024 { + t.Log("cpu shares not set correctly") + t.Fail() + } + + if container.Cgroups.Memory != 5248000 { + t.Log("memory limit not set correctly") + t.Fail() + } +} From 0aacca3ae6fa7d46a3e2c4e60e71f67c9a4c64e5 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 00:20:14 +0000 Subject: [PATCH 046/219] Fix typos in nsinit logs. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/nsinit/exec.go | 2 +- pkg/libcontainer/nsinit/nsinit/main.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index e76e060d1c..430dd89ff3 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -57,7 +57,7 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err != nil { return -1, err } - ns.logger.Printf("writting pid %d to file\n", command.Process.Pid) + ns.logger.Printf("writing pid %d to file\n", command.Process.Pid) if err := ns.stateWriter.WritePid(command.Process.Pid, started); err != nil { command.Process.Kill() return -1, err diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 37aa784981..0965c1c8ca 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -32,7 +32,7 @@ func main() { registerFlags() if flag.NArg() < 1 { - log.Fatalf("wrong number of argments %d", flag.NArg()) + log.Fatalf("wrong number of arguments %d", flag.NArg()) } container, err := loadContainer() if err != nil { @@ -73,7 +73,7 @@ func main() { l.Fatal(err) } if flag.NArg() < 2 { - l.Fatalf("wrong number of argments %d", flag.NArg()) + l.Fatalf("wrong number of arguments %d", flag.NArg()) } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { From 569b23413502713342b605abaf917f664d206a4b Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 01:10:11 +0000 Subject: [PATCH 047/219] Add enabled option to namespaces and capabilities spec in container.json. Although we don't yet check for enabled everywhere. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/README.md | 81 +++++++++++++++++++------- pkg/libcontainer/container.json | 100 +++++++++++++++++++++++++------- 2 files changed, 141 insertions(+), 40 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index 224465ce1c..1ab2a48ea5 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -41,21 +41,52 @@ Sample `container.json` file: "TERM=xterm" ], "capabilities_mask" : [ - { "key": "SETPCAP" }, - { "key": "SYS_MODULE" }, - { "key": "SYS_RAWIO" }, - { "key": "SYS_PACCT" }, - { "key": "SYS_ADMIN" }, - { "key": "SYS_NICE" }, - { "key": "SYS_RESOURCE" }, - { "key": "SYS_TIME" }, - { "key": "SYS_TTY_CONFIG" }, - { "key": "MKNOD" }, - { "key": "AUDIT_WRITE" }, - { "key": "AUDIT_CONTROL" }, - { "key": "MAC_OVERRIDE" }, - { "key": "MAC_ADMIN" }, - { "key": "NET_ADMIN" } + { + "key": "SETPCAP", + "enabled": true + }, + { "key": "SYS_MODULE", + "enabled": true + }, + { "key": "SYS_RAWIO", + "enabled": true + }, + { "key": "SYS_PACCT", + "enabled": true + }, + { "key": "SYS_ADMIN", + "enabled": true + }, + { "key": "SYS_NICE", + "enabled": true + }, + { "key": "SYS_RESOURCE", + "enabled": true + }, + { "key": "SYS_TIME", + "enabled": true + }, + { "key": "SYS_TTY_CONFIG", + "enabled": true + }, + { "key": "MKNOD", + "enabled": true + }, + { "key": "AUDIT_WRITE", + "enabled": true + }, + { "key": "AUDIT_CONTROL", + "enabled": true + }, + { "key": "MAC_OVERRIDE", + "enabled": true + }, + { "key": "MAC_ADMIN", + "enabled": true + }, + { "key": "NET_ADMIN", + "enabled": true + } ], "context" : { "apparmor_profile" : "docker-default" @@ -81,11 +112,21 @@ Sample `container.json` file: } ], "namespaces" : [ - { "key": "NEWNS" }, - { "key": "NEWUTS" }, - { "key": "NEWIPC" }, - { "key": "NEWPID" }, - { "key": "NEWNET" } + { "key": "NEWNS", + "enabled": true + }, + { "key": "NEWUTS", + "enabled": true + }, + { "key": "NEWIPC", + "enabled": true + }, + { "key": "NEWPID", + "enabled": true + }, + { "key": "NEWNET", + "enabled": true + } ] } ``` diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index b0465d4890..03a5091efa 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -8,28 +8,88 @@ "TERM=xterm-256color" ], "namespaces": [ - { "key": "NEWIPC" }, - { "key": "NEWNS" }, - { "key": "NEWPID" }, - { "key": "NEWUTS" }, - { "key": "NEWNET" } + { + "key": "NEWIPC", + "enabled": true + }, + { + "key": "NEWNS", + "enabled": true + }, + { + "key": "NEWPID", + "enabled": true + }, + { + "key": "NEWUTS", + "enabled": true + }, + { + "key": "NEWNET", + "enabled": true + } ], "capabilities_mask": [ - { "key": "SETPCAP" }, - { "key": "SYS_MODULE" }, - { "key": "SYS_RAWIO" }, - { "key": "SYS_PACCT" }, - { "key": "SYS_ADMIN" }, - { "key": "SYS_NICE" }, - { "key": "SYS_RESOURCE" }, - { "key": "SYS_TIME" }, - { "key": "SYS_TTY_CONFIG" }, - { "key": "MKNOD" }, - { "key": "AUDIT_WRITE" }, - { "key": "AUDIT_CONTROL" }, - { "key": "MAC_OVERRIDE" }, - { "key": "MAC_ADMIN" }, - { "key": "NET_ADMIN" } + { + "key": "SETPCAP", + "enabled": true + }, + { + "key": "SYS_MODULE", + "enabled": true + }, + { + "key": "SYS_RAWIO", + "enabled": false + }, + { + "key": "SYS_PACCT", + "enabled": true + }, + { + "key": "SYS_ADMIN", + "enabled": true + }, + { + "key": "SYS_NICE", + "enabled": true + }, + { + "key": "SYS_RESOURCE", + "enabled": true + }, + { + "key": "SYS_TIME", + "enabled": true + }, + { + "key": "SYS_TTY_CONFIG", + "enabled": true + }, + { + "key": "MKNOD", + "enabled": true + }, + { + "key": "AUDIT_WRITE", + "enabled": true + }, + { + "key": "AUDIT_CONTROL", + "enabled": true + }, + { + "key": "MAC_OVERRIDE", + "enabled": true + }, + { + "key": "MAC_ADMIN", + "enabled": true + }, + { + "key": "NET_ADMIN", + "enabled": true + } ], "networks": [{ "type": "veth", From 580c2620e7b92d9aee7c1cd033ca987dda161cf1 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 01:23:48 +0000 Subject: [PATCH 048/219] Improved README formatting. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/README.md | 99 +++++++++++++++++++++++--------------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index 1ab2a48ea5..b58b789d73 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -42,50 +42,64 @@ Sample `container.json` file: ], "capabilities_mask" : [ { - "key": "SETPCAP", - "enabled": true + "key": "SETPCAP", + "enabled": true }, - { "key": "SYS_MODULE", - "enabled": true + { + "key": "SYS_MODULE", + "enabled": true }, - { "key": "SYS_RAWIO", - "enabled": true + { + "key": "SYS_RAWIO", + "enabled": false }, - { "key": "SYS_PACCT", - "enabled": true + { + "key": "SYS_PACCT", + "enabled": true }, - { "key": "SYS_ADMIN", - "enabled": true + { + "key": "SYS_ADMIN", + "enabled": true }, - { "key": "SYS_NICE", - "enabled": true + { + "key": "SYS_NICE", + "enabled": true }, - { "key": "SYS_RESOURCE", - "enabled": true + { + "key": "SYS_RESOURCE", + "enabled": true }, - { "key": "SYS_TIME", - "enabled": true + { + "key": "SYS_TIME", + "enabled": true }, - { "key": "SYS_TTY_CONFIG", - "enabled": true + { + "key": "SYS_TTY_CONFIG", + "enabled": true }, - { "key": "MKNOD", - "enabled": true + { + "key": "MKNOD", + "enabled": true }, - { "key": "AUDIT_WRITE", - "enabled": true + { + "key": "AUDIT_WRITE", + "enabled": true }, - { "key": "AUDIT_CONTROL", - "enabled": true + { + "key": "AUDIT_CONTROL", + "enabled": true }, - { "key": "MAC_OVERRIDE", - "enabled": true + { + "key": "MAC_OVERRIDE", + "enabled": true }, - { "key": "MAC_ADMIN", - "enabled": true + { + "key": "MAC_ADMIN", + "enabled": true }, - { "key": "NET_ADMIN", - "enabled": true + { + "key": "NET_ADMIN", + "enabled": true } ], "context" : { @@ -112,20 +126,25 @@ Sample `container.json` file: } ], "namespaces" : [ - { "key": "NEWNS", - "enabled": true + { + "key": "NEWNS", + "enabled": true }, - { "key": "NEWUTS", - "enabled": true + { + "key": "NEWUTS", + "enabled": true }, - { "key": "NEWIPC", - "enabled": true + { + "key": "NEWIPC", + "enabled": true }, - { "key": "NEWPID", - "enabled": true + { + "key": "NEWPID", + "enabled": true }, - { "key": "NEWNET", - "enabled": true + { + "key": "NEWNET", + "enabled": true } ] } From ad924959a9879af7477fa23c19d570882c1f378f Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Thu, 24 Apr 2014 22:59:37 +0000 Subject: [PATCH 049/219] Add memory usage and max usage stats. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/cgroups/fs/memory.go | 22 ++++++++++-- pkg/cgroups/fs/utils.go | 11 ++++++ pkg/cgroups/fs/utils_test.go | 68 ++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 3 deletions(-) create mode 100644 pkg/cgroups/fs/utils_test.go diff --git a/pkg/cgroups/fs/memory.go b/pkg/cgroups/fs/memory.go index 5315291197..837640c088 100644 --- a/pkg/cgroups/fs/memory.go +++ b/pkg/cgroups/fs/memory.go @@ -2,6 +2,7 @@ package fs import ( "bufio" + "fmt" "os" "path/filepath" "strconv" @@ -56,13 +57,14 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { return nil, err } - f, err := os.Open(filepath.Join(path, "memory.stat")) + // Set stats from memory.stat. + statsFile, err := os.Open(filepath.Join(path, "memory.stat")) if err != nil { return nil, err } - defer f.Close() + defer statsFile.Close() - sc := bufio.NewScanner(f) + sc := bufio.NewScanner(statsFile) for sc.Scan() { t, v, err := getCgroupParamKeyValue(sc.Text()) if err != nil { @@ -70,5 +72,19 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) { } paramData[t] = v } + + // Set memory usage and max historical usage. + params := []string{ + "usage_in_bytes", + "max_usage_in_bytes", + } + for _, param := range params { + value, err := getCgroupParamFloat64(path, fmt.Sprintf("memory.%s", param)) + if err != nil { + return nil, err + } + paramData[param] = value + } + return paramData, nil } diff --git a/pkg/cgroups/fs/utils.go b/pkg/cgroups/fs/utils.go index f4c4846b8c..8be65c97ea 100644 --- a/pkg/cgroups/fs/utils.go +++ b/pkg/cgroups/fs/utils.go @@ -3,6 +3,8 @@ package fs import ( "errors" "fmt" + "io/ioutil" + "path/filepath" "strconv" "strings" ) @@ -27,3 +29,12 @@ func getCgroupParamKeyValue(t string) (string, float64, error) { return "", 0.0, ErrNotValidFormat } } + +// Gets a single float64 value from the specified cgroup file. +func getCgroupParamFloat64(cgroupPath, cgroupFile string) (float64, error) { + contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) + if err != nil { + return -1.0, err + } + return strconv.ParseFloat(strings.TrimSpace(string(contents)), 64) +} diff --git a/pkg/cgroups/fs/utils_test.go b/pkg/cgroups/fs/utils_test.go new file mode 100644 index 0000000000..c8f1b0172b --- /dev/null +++ b/pkg/cgroups/fs/utils_test.go @@ -0,0 +1,68 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const ( + cgroupFile = "cgroup.file" + floatValue = 2048.0 + floatString = "2048" +) + +func TestGetCgroupParamsFloat64(t *testing.T) { + // Setup tempdir. + tempDir, err := ioutil.TempDir("", "cgroup_utils_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, cgroupFile) + + // Success. + err = ioutil.WriteFile(tempFile, []byte(floatString), 0755) + if err != nil { + t.Fatal(err) + } + value, err := getCgroupParamFloat64(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %f to equal %f", value, floatValue) + } + + // Success with new line. + err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755) + if err != nil { + t.Fatal(err) + } + value, err = getCgroupParamFloat64(tempDir, cgroupFile) + if err != nil { + t.Fatal(err) + } else if value != floatValue { + t.Fatalf("Expected %f to equal %f", value, floatValue) + } + + // Not a float. + err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamFloat64(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } + + // Unknown file. + err = os.Remove(tempFile) + if err != nil { + t.Fatal(err) + } + _, err = getCgroupParamFloat64(tempDir, cgroupFile) + if err == nil { + t.Fatal("Expecting error, got none") + } +} From 24f978094dc5c9eae0ca60001b65256b2b30f2c8 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 06:02:30 +0000 Subject: [PATCH 050/219] Updated sample config and README to match the default template for native execdriver. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/README.md | 26 +++++++++++++------------- pkg/libcontainer/container.json | 26 +++++++++++++------------- pkg/libcontainer/container_test.go | 17 +++++++++++++---- 3 files changed, 39 insertions(+), 30 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index b58b789d73..70f22f5639 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -43,11 +43,11 @@ Sample `container.json` file: "capabilities_mask" : [ { "key": "SETPCAP", - "enabled": true + "enabled": false }, { "key": "SYS_MODULE", - "enabled": true + "enabled": false }, { "key": "SYS_RAWIO", @@ -55,27 +55,27 @@ Sample `container.json` file: }, { "key": "SYS_PACCT", - "enabled": true + "enabled": false }, { "key": "SYS_ADMIN", - "enabled": true + "enabled": false }, { "key": "SYS_NICE", - "enabled": true + "enabled": false }, { "key": "SYS_RESOURCE", - "enabled": true + "enabled": false }, { "key": "SYS_TIME", - "enabled": true + "enabled": false }, { "key": "SYS_TTY_CONFIG", - "enabled": true + "enabled": false }, { "key": "MKNOD", @@ -83,23 +83,23 @@ Sample `container.json` file: }, { "key": "AUDIT_WRITE", - "enabled": true + "enabled": false }, { "key": "AUDIT_CONTROL", - "enabled": true + "enabled": false }, { "key": "MAC_OVERRIDE", - "enabled": true + "enabled": false }, { "key": "MAC_ADMIN", - "enabled": true + "enabled": false }, { "key": "NET_ADMIN", - "enabled": true + "enabled": false } ], "context" : { diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 03a5091efa..68f9504f99 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -32,11 +32,11 @@ "capabilities_mask": [ { "key": "SETPCAP", - "enabled": true + "enabled": false }, { "key": "SYS_MODULE", - "enabled": true + "enabled": false }, { "key": "SYS_RAWIO", @@ -44,27 +44,27 @@ }, { "key": "SYS_PACCT", - "enabled": true + "enabled": false }, { "key": "SYS_ADMIN", - "enabled": true + "enabled": false }, { "key": "SYS_NICE", - "enabled": true + "enabled": false }, { "key": "SYS_RESOURCE", - "enabled": true + "enabled": false }, { "key": "SYS_TIME", - "enabled": true + "enabled": false }, { "key": "SYS_TTY_CONFIG", - "enabled": true + "enabled": false }, { "key": "MKNOD", @@ -72,23 +72,23 @@ }, { "key": "AUDIT_WRITE", - "enabled": true + "enabled": false }, { "key": "AUDIT_CONTROL", - "enabled": true + "enabled": false }, { "key": "MAC_OVERRIDE", - "enabled": true + "enabled": false }, { "key": "MAC_ADMIN", - "enabled": true + "enabled": false }, { "key": "NET_ADMIN", - "enabled": true + "enabled": false } ], "networks": [{ diff --git a/pkg/libcontainer/container_test.go b/pkg/libcontainer/container_test.go index 06e7979b0a..c413c7c34a 100644 --- a/pkg/libcontainer/container_test.go +++ b/pkg/libcontainer/container_test.go @@ -15,8 +15,7 @@ func TestContainerJsonFormat(t *testing.T) { var container *Container if err := json.NewDecoder(f).Decode(&container); err != nil { - t.Log("failed to decode container config") - t.FailNow() + t.Fatal("failed to decode container config") } if container.Hostname != "koye" { t.Log("hostname is not set") @@ -39,12 +38,22 @@ func TestContainerJsonFormat(t *testing.T) { } if !container.CapabilitiesMask.Contains("SYS_ADMIN") { - t.Log("capabilities should contain SYS_ADMIN") + t.Log("capabilities mask should contain SYS_ADMIN") + t.Fail() + } + + if container.CapabilitiesMask.Get("SYS_ADMIN").Enabled { + t.Log("SYS_ADMIN should not be enabled in capabilities mask") + t.Fail() + } + + if !container.CapabilitiesMask.Get("MKNOD").Enabled { + t.Log("MKNOD should be enabled in capabilities mask") t.Fail() } if container.CapabilitiesMask.Contains("SYS_CHROOT") { - t.Log("capabitlies should not contain SYS_CHROOT") + t.Log("capabilities mask should not contain SYS_CHROOT") t.Fail() } From c7bd1f4e648c6615eafebbfcaeb1c30020fc0aba Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 25 Apr 2014 20:36:31 +1000 Subject: [PATCH 051/219] small api doc formatting fixup Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- .../reference/api/docker_remote_api.md | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 3c58b1b990..a6aafbeee8 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -127,83 +127,83 @@ entry for each repo/tag on an image, each image is only represented once, with a nested attribute indicating the repo/tags that apply to that image. - Instead of: +Instead of: - HTTP/1.1 200 OK - Content-Type: application/json + HTTP/1.1 200 OK + Content-Type: application/json - [ - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "12.04", - "Repository": "ubuntu" - }, - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "latest", - "Repository": "ubuntu" - }, - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "precise", - "Repository": "ubuntu" - }, - { - "VirtualSize": 180116135, - "Size": 24653, - "Created": 1364102658, - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Tag": "12.10", - "Repository": "ubuntu" - }, - { - "VirtualSize": 180116135, - "Size": 24653, - "Created": 1364102658, - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Tag": "quantal", - "Repository": "ubuntu" - } - ] + [ + { + "VirtualSize": 131506275, + "Size": 131506275, + "Created": 1365714795, + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Tag": "12.04", + "Repository": "ubuntu" + }, + { + "VirtualSize": 131506275, + "Size": 131506275, + "Created": 1365714795, + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Tag": "latest", + "Repository": "ubuntu" + }, + { + "VirtualSize": 131506275, + "Size": 131506275, + "Created": 1365714795, + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Tag": "precise", + "Repository": "ubuntu" + }, + { + "VirtualSize": 180116135, + "Size": 24653, + "Created": 1364102658, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Tag": "12.10", + "Repository": "ubuntu" + }, + { + "VirtualSize": 180116135, + "Size": 24653, + "Created": 1364102658, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Tag": "quantal", + "Repository": "ubuntu" + } + ] - The returned json looks like this: +The returned json looks like this: - HTTP/1.1 200 OK - Content-Type: application/json + HTTP/1.1 200 OK + Content-Type: application/json - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] `GET /images/viz` From 8cdb720d26197e448587a21894069ee8a20e8aa0 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Fri, 25 Apr 2014 21:10:23 +0000 Subject: [PATCH 052/219] Updated sample config to be usable. We should change the namespace config to not need "value" later. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/container.json | 48 ++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 68f9504f99..7c69a180fe 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -9,24 +9,34 @@ ], "namespaces": [ { - "key": "NEWIPC", - "enabled": true + "file": "ipc", + "value": 134217728, + "enabled": true, + "key": "NEWIPC" }, { - "key": "NEWNS", - "enabled": true + "file": "mnt", + "value": 131072, + "enabled": true, + "key": "NEWNS" }, { - "key": "NEWPID", - "enabled": true + "file": "pid", + "value": 536870912, + "enabled": true, + "key": "NEWPID" }, { - "key": "NEWUTS", - "enabled": true + "file": "uts", + "value": 67108864, + "enabled": true, + "key": "NEWUTS" }, { - "key": "NEWNET", - "enabled": true + "file": "net", + "value": 1073741824, + "enabled": true, + "key": "NEWNET" } ], "capabilities_mask": [ @@ -91,14 +101,21 @@ "enabled": false } ], - "networks": [{ + "networks": [ + { + "type": "loopback", + "gateway": "localhost", + "address": "127.0.0.1/0", + "mtu": 1500 + }, + { "type": "veth", + "gateway": "172.17.42.1", + "address": "172.17.0.4/16", "context": { - "bridge": "docker0", - "prefix": "dock" + "prefix": "dock", + "bridge": "docker0" }, - "address": "172.17.0.100/16", - "gateway": "172.17.42.1", "mtu": 1500 } ], @@ -106,6 +123,7 @@ "name": "docker-koye", "parent": "docker", "memory": 5248000, + "memory_swap": -1, "cpu_shares": 1024 } } From 7790a77b6afeda6f223202288a89f4b431f033a0 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 25 Apr 2014 20:01:25 -0400 Subject: [PATCH 053/219] static_registry: update the test for the new struct Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- registry/registry_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/registry_test.go b/registry/registry_test.go index f21814c791..f53345c1fd 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -22,11 +22,11 @@ func spawnTestRegistry(t *testing.T) *Registry { } func TestPingRegistryEndpoint(t *testing.T) { - standalone, err := pingRegistryEndpoint(makeURL("/v1/")) + regInfo, err := pingRegistryEndpoint(makeURL("/v1/")) if err != nil { t.Fatal(err) } - assertEqual(t, standalone, true, "Expected standalone to be true (default)") + assertEqual(t, regInfo.Standalone, true, "Expected standalone to be true (default)") } func TestGetRemoteHistory(t *testing.T) { From 9422451ac3f541a17daba0d5f6dc8b40a6edc9e9 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Tue, 22 Apr 2014 16:51:06 -0700 Subject: [PATCH 054/219] engine.Installer: a standard interface for "installable" services Installer is a standard interface for objects which can "install" themselves an engine by registering handlers. This can be used as an entrypoint for external plugins etc. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/engine.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/engine/engine.go b/engine/engine.go index 58c37ab933..aaf5c1f595 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -10,6 +10,13 @@ import ( "strings" ) +// Installer is a standard interface for objects which can "install" themselves +// on an engine by registering handlers. +// This can be used as an entrypoint for external plugins etc. +type Installer interface { + Install(*Engine) error +} + type Handler func(*Job) Status var globalHandlers map[string]Handler From 68d3e757503fab422fc96a00d511336a3fdfd619 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 24 Apr 2014 00:36:21 -0700 Subject: [PATCH 055/219] engine: allow registering a "catchall" handler which receives all commands Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/engine.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/engine/engine.go b/engine/engine.go index aaf5c1f595..dc1984ccb5 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -43,6 +43,7 @@ func unregister(name string) { // containers by executing *jobs*. type Engine struct { handlers map[string]Handler + catchall Handler hack Hack // data for temporary hackery (see hack.go) id string Stdout io.Writer @@ -60,6 +61,10 @@ func (eng *Engine) Register(name string, handler Handler) error { return nil } +func (eng *Engine) RegisterCatchall(catchall Handler) { + eng.catchall = catchall +} + // New initializes a new engine. func New() *Engine { eng := &Engine{ @@ -113,9 +118,13 @@ func (eng *Engine) Job(name string, args ...string) *Job { if eng.Logging { job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) } - handler, exists := eng.handlers[name] - if exists { - job.handler = handler + if eng.catchall != nil { + job.handler = eng.catchall + } else { + handler, exists := eng.handlers[name] + if exists { + job.handler = handler + } } return job } From 3c1d5ca33ecbd644d4e8d864ff59b389f4a4a555 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 24 Apr 2014 00:46:32 -0700 Subject: [PATCH 056/219] Remote communication between engines using beam Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/remote.go | 109 ++++++++++++++++++++++++++++++++++++++++++ engine/remote_test.go | 3 ++ 2 files changed, 112 insertions(+) create mode 100644 engine/remote.go create mode 100644 engine/remote_test.go diff --git a/engine/remote.go b/engine/remote.go new file mode 100644 index 0000000000..1e8777a4b7 --- /dev/null +++ b/engine/remote.go @@ -0,0 +1,109 @@ +package engine + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/beam" + "github.com/dotcloud/docker/pkg/beam/data" + "io" + "os" + "strconv" + "sync" +) + +type Sender struct { + beam.Sender +} + +func NewSender(s beam.Sender) *Sender { + return &Sender{s} +} + +func (s *Sender) Install(eng *Engine) error { + // FIXME: this doesn't exist yet. + eng.RegisterCatchall(s.Handle) + return nil +} + +func (s *Sender) Handle(job *Job) Status { + msg := data.Empty().Set("cmd", append([]string{job.Name}, job.Args...)...) + peer, err := beam.SendConn(s, msg.Bytes()) + if err != nil { + return job.Errorf("beamsend: %v", err) + } + defer peer.Close() + var tasks sync.WaitGroup + defer tasks.Wait() + r := beam.NewRouter(nil) + r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error { + tasks.Add(1) + io.Copy(job.Stdout, stdout) + tasks.Done() + return nil + }) + r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error { + tasks.Add(1) + io.Copy(job.Stderr, stderr) + tasks.Done() + return nil + }) + var status int + r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error { + cmd := data.Message(p).Get("cmd") + if len(cmd) != 3 { + return fmt.Errorf("usage: %s <0-127>", cmd[0]) + } + s, err := strconv.ParseUint(cmd[2], 10, 8) + if err != nil { + return fmt.Errorf("usage: %s <0-127>", cmd[0]) + } + status = int(s) + return nil + + }) + if _, err := beam.Copy(r, peer); err != nil { + return job.Errorf("%v", err) + } + return Status(status) +} + +type Receiver struct { + *Engine + peer beam.Receiver +} + +func NewReceiver(peer beam.Receiver) *Receiver { + return &Receiver{Engine: New(), peer: peer} +} + +func (rcv *Receiver) Run() error { + r := beam.NewRouter(nil) + r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error { + // Use the attachment as a beam return channel + peer, err := beam.FileConn(f) + if err != nil { + f.Close() + return err + } + cmd := data.Message(p).Get("cmd") + job := rcv.Engine.Job(cmd[0], cmd[1:]...) + stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes()) + if err != nil { + return err + } + job.Stdout.Add(stdout) + stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes()) + if err != nil { + return err + } + job.Stderr.Add(stderr) + // ignore error because we pass the raw status + job.Run() + err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) + if err != nil { + return err + } + return nil + }) + _, err := beam.Copy(r, rcv.peer) + return err +} diff --git a/engine/remote_test.go b/engine/remote_test.go new file mode 100644 index 0000000000..54092ec934 --- /dev/null +++ b/engine/remote_test.go @@ -0,0 +1,3 @@ +package engine + +import () From b63b98ee2766321e2ca6f3b159c2bfb303870105 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Fri, 25 Apr 2014 16:47:03 -0700 Subject: [PATCH 057/219] engine.Sender and engine.Receiver support stdin Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/remote.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/engine/remote.go b/engine/remote.go index 1e8777a4b7..48638e4383 100644 --- a/engine/remote.go +++ b/engine/remote.go @@ -46,6 +46,12 @@ func (s *Sender) Handle(job *Job) Status { tasks.Done() return nil }) + r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error { + tasks.Add(1) + io.Copy(stdin, job.Stdin) + tasks.Done() + return nil + }) var status int r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error { cmd := data.Message(p).Get("cmd") @@ -96,6 +102,11 @@ func (rcv *Receiver) Run() error { return err } job.Stderr.Add(stderr) + stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes()) + if err != nil { + return err + } + job.Stdin.Add(stdin) // ignore error because we pass the raw status job.Run() err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil) From 7e3624a498b8b96a4e8a0f1d59fc2c50bf48efb3 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Fri, 25 Apr 2014 16:48:16 -0700 Subject: [PATCH 058/219] engine: 'rengine' is a small command-line utility to debug remote engine Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/rengine/main.go | 43 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 engine/rengine/main.go diff --git a/engine/rengine/main.go b/engine/rengine/main.go new file mode 100644 index 0000000000..b4fa01d39c --- /dev/null +++ b/engine/rengine/main.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/beam" + "net" + "os" +) + +func main() { + eng := engine.New() + + c, err := net.Dial("unix", "beam.sock") + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + defer c.Close() + f, err := c.(*net.UnixConn).File() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + + child, err := beam.FileConn(f) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + defer child.Close() + + sender := engine.NewSender(child) + sender.Install(eng) + + cmd := eng.Job(os.Args[1], os.Args[2:]...) + cmd.Stdout.Add(os.Stdout) + cmd.Stderr.Add(os.Stderr) + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} From f4055ee2a467458d4848ad6f8e79fc0162d377f9 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Fri, 25 Apr 2014 17:44:40 +0000 Subject: [PATCH 059/219] Adding a test for blkio stats. Also adds a test utility we can use for other cgroup tests. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/cgroups/fs/blkio_test.go | 169 +++++++++++++++++++++++++++++++++++ pkg/cgroups/fs/test_util.go | 75 ++++++++++++++++ 2 files changed, 244 insertions(+) create mode 100644 pkg/cgroups/fs/blkio_test.go create mode 100644 pkg/cgroups/fs/test_util.go diff --git a/pkg/cgroups/fs/blkio_test.go b/pkg/cgroups/fs/blkio_test.go new file mode 100644 index 0000000000..5279ac437b --- /dev/null +++ b/pkg/cgroups/fs/blkio_test.go @@ -0,0 +1,169 @@ +package fs + +import ( + "testing" +) + +const ( + sectorsRecursiveContents = `8:0 1024` + serviceBytesRecursiveContents = `8:0 Read 100 +8:0 Write 400 +8:0 Sync 200 +8:0 Async 300 +8:0 Total 500 +Total 500` + servicedRecursiveContents = `8:0 Read 10 +8:0 Write 40 +8:0 Sync 20 +8:0 Async 30 +8:0 Total 50 +Total 50` + queuedRecursiveContents = `8:0 Read 1 +8:0 Write 4 +8:0 Sync 2 +8:0 Async 3 +8:0 Total 5 +Total 5` +) + +func TestBlkioStats(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + stats, err := blkio.Stats(helper.CgroupData) + if err != nil { + t.Fatal(err) + } + + // Verify expected stats. + expectedStats := map[string]float64{ + "blkio.sectors_recursive:8:0": 1024.0, + + // Serviced bytes. + "io_service_bytes_recursive:8:0:Read": 100.0, + "io_service_bytes_recursive:8:0:Write": 400.0, + "io_service_bytes_recursive:8:0:Sync": 200.0, + "io_service_bytes_recursive:8:0:Async": 300.0, + "io_service_bytes_recursive:8:0:Total": 500.0, + + // Serviced requests. + "io_serviced_recursive:8:0:Read": 10.0, + "io_serviced_recursive:8:0:Write": 40.0, + "io_serviced_recursive:8:0:Sync": 20.0, + "io_serviced_recursive:8:0:Async": 30.0, + "io_serviced_recursive:8:0:Total": 50.0, + + // Queued requests. + "io_queued_recursive:8:0:Read": 1.0, + "io_queued_recursive:8:0:Write": 4.0, + "io_queued_recursive:8:0:Sync": 2.0, + "io_queued_recursive:8:0:Async": 3.0, + "io_queued_recursive:8:0:Total": 5.0, + } + expectStats(t, expectedStats, stats) +} + +func TestBlkioStatsNoSectorsFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsNoServiceBytesFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsNoServicedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsNoQueuedFile(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": serviceBytesRecursiveContents, + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read 100 100", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} + +func TestBlkioStatsUnexpectedFieldType(t *testing.T) { + helper := NewCgroupTestUtil("blkio", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "blkio.io_service_bytes_recursive": "8:0 Read Write", + "blkio.io_serviced_recursive": servicedRecursiveContents, + "blkio.io_queued_recursive": queuedRecursiveContents, + "blkio.sectors_recursive": sectorsRecursiveContents, + }) + + blkio := &blkioGroup{} + _, err := blkio.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not") + } +} diff --git a/pkg/cgroups/fs/test_util.go b/pkg/cgroups/fs/test_util.go new file mode 100644 index 0000000000..11b90b21d6 --- /dev/null +++ b/pkg/cgroups/fs/test_util.go @@ -0,0 +1,75 @@ +/* +Utility for testing cgroup operations. + +Creates a mock of the cgroup filesystem for the duration of the test. +*/ +package fs + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "testing" +) + +type cgroupTestUtil struct { + // data to use in tests. + CgroupData *data + + // Path to the mock cgroup directory. + CgroupPath string + + // Temporary directory to store mock cgroup filesystem. + tempDir string + t *testing.T +} + +// Creates a new test util for the specified subsystem +func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil { + d := &data{} + tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem)) + if err != nil { + t.Fatal(err) + } + d.root = tempDir + testCgroupPath, err := d.path(subsystem) + if err != nil { + t.Fatal(err) + } + + // Ensure the full mock cgroup path exists. + err = os.MkdirAll(testCgroupPath, 0755) + if err != nil { + t.Fatal(err) + } + return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t} +} + +func (c *cgroupTestUtil) cleanup() { + os.RemoveAll(c.tempDir) +} + +// Write the specified contents on the mock of the specified cgroup files. +func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) { + for file, contents := range fileContents { + err := writeFile(c.CgroupPath, file, contents) + if err != nil { + c.t.Fatal(err) + } + } +} + +// Expect the specified stats. +func expectStats(t *testing.T, expected, actual map[string]float64) { + for stat, expectedValue := range expected { + actualValue, ok := actual[stat] + if !ok { + log.Printf("Expected stat %s to exist: %s", stat, actual) + t.Fail() + } else if actualValue != expectedValue { + log.Printf("Expected stats %s to have value %f but had %f instead", stat, expectedValue, actualValue) + t.Fail() + } + } +} From 7744f63159cee9cfa5d9e9307e5c29a4f589837f Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sat, 26 Apr 2014 08:10:19 -0400 Subject: [PATCH 060/219] Updated Docs README Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/README.md | 74 ++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/docs/README.md b/docs/README.md index a113cb9edd..e5474d524d 100755 --- a/docs/README.md +++ b/docs/README.md @@ -7,64 +7,60 @@ Overview The source for Docker documentation is here under ``sources/`` and uses extended Markdown, as implemented by [mkdocs](http://mkdocs.org). -The HTML files are built and hosted on https://docs.docker.io, and update -automatically after each change to the master or release branch of the -[docker files on GitHub](https://github.com/dotcloud/docker) thanks to -post-commit hooks. The "release" branch maps to the "latest" -documentation and the "master" (unreleased development) branch maps to the "master" -documentation. +The HTML files are built and hosted on `https://docs.docker.io`, and +update automatically after each change to the master or release branch +of [Docker on GitHub](https://github.com/dotcloud/docker) +thanks to post-commit hooks. The "docs" branch maps to the "latest" +documentation and the "master" (unreleased development) branch maps to +the "master" documentation. ## Branches **There are two branches related to editing docs**: ``master`` and a -``docs`` branch. You should always edit -docs on a local branch of the ``master`` branch, and send a PR against ``master``. -That way your fixes -will automatically get included in later releases, and docs maintainers -can easily cherry-pick your changes into the ``docs`` release branch. -In the rare case where your change is not forward-compatible, -you may need to base your changes on the ``docs`` branch. +``docs`` branch. You should always edit documentation on a local branch +of the ``master`` branch, and send a PR against ``master``. -Now that we have a ``docs`` branch, we can keep the [http://docs.docker.io](http://docs.docker.io) docs -up to date with any bugs found between ``docker`` code releases. +That way your fixes will automatically get included in later releases, +and docs maintainers can easily cherry-pick your changes into the +``docs`` release branch. In the rare case where your change is not +forward-compatible, you may need to base your changes on the ``docs`` +branch. -**Warning**: When *reading* the docs, the [http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may -include features not yet part of any official docker -release. The ``beta-docs`` site should be used only for understanding -bleeding-edge development and ``docs.docker.io`` (which points to the ``docs`` +Also, now that we have a ``docs`` branch, we can keep the +[http://docs.docker.io](http://docs.docker.io) docs up to date with any +bugs found between ``docker`` code releases. + +**Warning**: When *reading* the docs, the +[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation +may include features not yet part of any official docker release. The +``beta-docs`` site should be used only for understanding bleeding-edge +development and ``docs.docker.io`` (which points to the ``docs`` branch``) should be used for the latest official release. Getting Started --------------- -Docker documentation builds are done in a docker container, which installs all -the required tools, adds the local ``docs/`` directory and builds the HTML -docs. It then starts a HTTP server on port 8000 so that you can connect -and see your changes. +Docker documentation builds are done in a Docker container, which +installs all the required tools, adds the local ``docs/`` directory and +builds the HTML docs. It then starts a HTTP server on port 8000 so that +you can connect and see your changes. -In the ``docker`` source directory, run: - ```make docs``` +In the root of the ``docker`` source directory: + + cd docker + +Run: + + make docs If you have any issues you need to debug, you can use ``make docs-shell`` and then run ``mkdocs serve`` # Contributing -## Normal Case: - * Follow the contribution guidelines ([see ``../CONTRIBUTING.md``](../CONTRIBUTING.md)). * [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) -* Work in your own fork of the code, we accept pull requests. -* Change the ``.md`` files with your favorite editor -- try to keep the - lines short (80 chars) and respect Markdown conventions. -* Run ``make clean docs`` to clean up old files and generate new ones, - or just ``make docs`` to update after small changes. -* Your static website can now be found in the ``_build`` directory. -* To preview what you have generated run ``make server`` and open - http://localhost:8000/ in your favorite browser. - -``make clean docs`` must complete without any warnings or errors. Working using GitHub's file editor ---------------------------------- @@ -87,7 +83,7 @@ Publishing Documentation ------------------------ To publish a copy of the documentation you need a ``docs/awsconfig`` -file containing AWS settings to deploy to. The release script will +file containing AWS settings to deploy to. The release script will create an s3 if needed, and will then push the files to it. ``` @@ -97,7 +93,7 @@ aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 ``` -The ``profile`` name must be the same as the name of the bucket you are +The ``profile`` name must be the same as the name of the bucket you are deploying to - which you call from the docker directory: ``make AWS_S3_BUCKET=dowideit-docs docs-release`` From df018bc8012ad4181ac16e1329c970fe213f1f87 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sat, 26 Apr 2014 09:48:07 -0400 Subject: [PATCH 061/219] Fixed Dockerise to Dockerize Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/sources/introduction/understanding-docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md index 1c979d5810..5920da5bca 100644 --- a/docs/sources/introduction/understanding-docker.md +++ b/docs/sources/introduction/understanding-docker.md @@ -87,14 +87,14 @@ to you. *Docker is made for humans.* It's easy to get started and easy to build and deploy applications with -Docker: or as we say "*dockerise*" them! As much of Docker as possible +Docker: or as we say "*dockerize*" them! As much of Docker as possible uses plain English for commands and tries to be as lightweight and transparent as possible. We want to get out of the way so you can build and deploy your applications. ### Docker is Portable -*Dockerise And Go!* +*Dockerize And Go!* Docker containers are highly portable. Docker provides a standard container format to hold your applications: From 723d314f097fe7b1e15f27c1d6f05794b6c5f411 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sat, 26 Apr 2014 09:53:17 -0400 Subject: [PATCH 062/219] Replaced all double backticks in README with singles Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/README.md | 56 ++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/docs/README.md b/docs/README.md index e5474d524d..bbc741d593 100755 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,7 @@ Docker Documentation Overview -------- -The source for Docker documentation is here under ``sources/`` and uses +The source for Docker documentation is here under `sources/` and uses extended Markdown, as implemented by [mkdocs](http://mkdocs.org). The HTML files are built and hosted on `https://docs.docker.io`, and @@ -16,36 +16,36 @@ the "master" documentation. ## Branches -**There are two branches related to editing docs**: ``master`` and a -``docs`` branch. You should always edit documentation on a local branch -of the ``master`` branch, and send a PR against ``master``. +**There are two branches related to editing docs**: `master` and a +`docs` branch. You should always edit documentation on a local branch +of the `master` branch, and send a PR against `master`. That way your fixes will automatically get included in later releases, and docs maintainers can easily cherry-pick your changes into the -``docs`` release branch. In the rare case where your change is not -forward-compatible, you may need to base your changes on the ``docs`` +`docs` release branch. In the rare case where your change is not +forward-compatible, you may need to base your changes on the `docs` branch. -Also, now that we have a ``docs`` branch, we can keep the +Also, now that we have a `docs` branch, we can keep the [http://docs.docker.io](http://docs.docker.io) docs up to date with any -bugs found between ``docker`` code releases. +bugs found between `docker` code releases. **Warning**: When *reading* the docs, the [http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may include features not yet part of any official docker release. The -``beta-docs`` site should be used only for understanding bleeding-edge -development and ``docs.docker.io`` (which points to the ``docs`` -branch``) should be used for the latest official release. +`beta-docs` site should be used only for understanding bleeding-edge +development and `docs.docker.io` (which points to the `docs` +branch`) should be used for the latest official release. Getting Started --------------- Docker documentation builds are done in a Docker container, which -installs all the required tools, adds the local ``docs/`` directory and +installs all the required tools, adds the local `docs/` directory and builds the HTML docs. It then starts a HTTP server on port 8000 so that you can connect and see your changes. -In the root of the ``docker`` source directory: +In the root of the `docker` source directory: cd docker @@ -53,13 +53,13 @@ Run: make docs -If you have any issues you need to debug, you can use ``make docs-shell`` and -then run ``mkdocs serve`` +If you have any issues you need to debug, you can use `make docs-shell` and +then run `mkdocs serve` # Contributing * Follow the contribution guidelines ([see - ``../CONTRIBUTING.md``](../CONTRIBUTING.md)). + `../CONTRIBUTING.md`](../CONTRIBUTING.md)). * [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) Working using GitHub's file editor @@ -67,7 +67,7 @@ Working using GitHub's file editor Alternatively, for small changes and typos you might want to use GitHub's built in file editor. It allows you to preview your changes -right online (though there can be some differences between GitHub +right on-line (though there can be some differences between GitHub Markdown and mkdocs Markdown). Just be careful not to create many commits. And you must still [sign your work!](../CONTRIBUTING.md#sign-your-work) @@ -75,26 +75,24 @@ Images ------ When you need to add images, try to make them as small as possible -(e.g. as gif). Usually images should go in the same directory as the -.md file which references them, or in a subdirectory if one already +(e.g. as gifs). Usually images should go in the same directory as the +`.md` file which references them, or in a subdirectory if one already exists. Publishing Documentation ------------------------ -To publish a copy of the documentation you need a ``docs/awsconfig`` +To publish a copy of the documentation you need a `docs/awsconfig` file containing AWS settings to deploy to. The release script will create an s3 if needed, and will then push the files to it. -``` -[profile dowideit-docs] -aws_access_key_id = IHOIUAHSIDH234rwf.... -aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... -region = ap-southeast-2 -``` + [profile dowideit-docs] + aws_access_key_id = IHOIUAHSIDH234rwf.... + aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... + region = ap-southeast-2 -The ``profile`` name must be the same as the name of the bucket you are -deploying to - which you call from the docker directory: +The `profile` name must be the same as the name of the bucket you are +deploying to - which you call from the `docker` directory: -``make AWS_S3_BUCKET=dowideit-docs docs-release`` + make AWS_S3_BUCKET=dowideit-docs docs-release From e83fc70d362bf0035811bbcc5566020c6cc9893f Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Fri, 25 Apr 2014 13:21:28 -0700 Subject: [PATCH 063/219] Freeze ./integration and explain where to contribute new tests Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- integration/MAINTAINERS | 4 ++++ integration/README.md | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 integration/MAINTAINERS create mode 100644 integration/README.md diff --git a/integration/MAINTAINERS b/integration/MAINTAINERS new file mode 100644 index 0000000000..2d47d7a711 --- /dev/null +++ b/integration/MAINTAINERS @@ -0,0 +1,4 @@ +Solomon Hykes +# WE ARE LOOKING FOR VOLUNTEERS TO HELP CLEAN THIS UP. +# TO VOLUNTEER PLEASE OPEN A PULL REQUEST ADDING YOURSELF TO THIS FILE. +# WE WILL HELP YOU GET STARTED. THANKS! diff --git a/integration/README.md b/integration/README.md new file mode 100644 index 0000000000..41f43a4ba7 --- /dev/null +++ b/integration/README.md @@ -0,0 +1,23 @@ +## Legacy integration tests + +`./integration` contains Docker's legacy integration tests. +It is DEPRECATED and will eventually be removed. + +### If you are a *CONTRIBUTOR* and want to add a test: + +* Consider mocking out side effects and contributing a *unit test* in the subsystem +you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`. +The events subsystem has unit tests in `./events/events_test.go`. And so on. + +* For end-to-end integration tests, please contribute to `./integration-cli`. + + +### If you are a *MAINTAINER* + +Please don't allow patches adding new tests to `./integration`. + +### If you are *LOOKING FOR A WAY TO HELP* + +Please consider porting tests away from `./integration` and into either unit tests or CLI tests. + +Any help will be greatly appreciated! From 91deb591c8a52a974612b4c6885d989251f819bd Mon Sep 17 00:00:00 2001 From: Soulou Date: Tue, 11 Mar 2014 09:30:51 +0000 Subject: [PATCH 064/219] [Documentation - API] Add missing 'signal' parameter for /containers/:id/kill endpoint Docker-DCO-1.1-Signed-off-by: Leo Unbekandt (github: Soulou) --- .../sources/reference/api/archive/docker_remote_api_v1.7.rst | 1 + .../sources/reference/api/archive/docker_remote_api_v1.8.rst | 1 + docs/sources/reference/api/docker_remote_api_v1.10.md | 5 +++++ docs/sources/reference/api/docker_remote_api_v1.10.rst | 1 + docs/sources/reference/api/docker_remote_api_v1.11.md | 5 +++++ docs/sources/reference/api/docker_remote_api_v1.11.rst | 1 + docs/sources/reference/api/docker_remote_api_v1.9.md | 5 +++++ docs/sources/reference/api/docker_remote_api_v1.9.rst | 1 + 8 files changed, 20 insertions(+) diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst index 1bafaddfc5..7a4f688d8f 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst @@ -454,6 +454,7 @@ Kill a container HTTP/1.1 204 OK + :query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit. :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst index 16492dde76..4f1b266bb6 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst @@ -482,6 +482,7 @@ Kill a container HTTP/1.1 204 OK + :query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit. :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index c07f96f384..749ff8e383 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -459,6 +459,11 @@ Kill the container `id` HTTP/1.1 204 OK + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + Status Codes: - **204** – no error diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst index 83e2c3c15b..8635ec4826 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst @@ -466,6 +466,7 @@ Kill a container HTTP/1.1 204 OK + :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 5e3fdcb0a8..baabade455 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -461,6 +461,11 @@ Kill the container `id` HTTP/1.1 204 OK + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + Status Codes: - **204** – no error diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.rst b/docs/sources/reference/api/docker_remote_api_v1.11.rst index 556491c49a..d66b4b1410 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.11.rst @@ -468,6 +468,7 @@ Kill a container HTTP/1.1 204 OK + :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 74e85a7ee6..1fe154a3da 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -482,6 +482,11 @@ Kill the container `id` HTTP/1.1 204 OK + Query Parameters + + - **signal** - Signal to send to the container: integer or string like "SIGINT". + When not set, SIGKILL is assumed and the call will waits for the container to exit. + Status Codes: - **204** – no error diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index 4bbffcbd36..db0e3bfdae 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -480,6 +480,7 @@ Kill a container HTTP/1.1 204 OK + :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error From c6060a3b25b94ae4738ac27bbf62cbb914af7ced Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Sun, 27 Apr 2014 11:17:48 -0700 Subject: [PATCH 065/219] Added back OAuth and Accounts API docs pages Removed a now unused endpoint from the accounts API. Updated some of the accounts links to point to www.docker.io as the account signup and resend-email-confirmation links should no longer point to the index. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/mkdocs.yml | 2 + docs/sources/index/accounts.md | 4 +- .../reference/api/docker_io_accounts_api.md | 73 +------------------ 3 files changed, 5 insertions(+), 74 deletions(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 3538642717..0b526e016b 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -106,6 +106,8 @@ pages: - ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10'] - ['reference/api/docker_remote_api_v1.9.md', 'Reference', 'Docker Remote API v1.9'] - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] +- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API'] +- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API'] # Contribute: - ['contributing/index.md', '**HIDDEN**'] diff --git a/docs/sources/index/accounts.md b/docs/sources/index/accounts.md index c3138b61da..54d015ac2a 100644 --- a/docs/sources/index/accounts.md +++ b/docs/sources/index/accounts.md @@ -14,7 +14,7 @@ to need a [Docker IO](https://www.docker.io) account. ### Registration for a Docker IO Account You can get a Docker IO account by [signing up for one here]( -https://index.docker.io/account/signup/). A valid email address is required to +https://www.docker.io/account/signup/). A valid email address is required to register, which you will need to verify for account activation. ### Email activation process @@ -22,7 +22,7 @@ register, which you will need to verify for account activation. You need to have at least one verified email address to be able to use your Docker IO account. If you can't find the validation email, you can request another by visiting the [Resend Email Confirmation]( -https://index.docker.io/account/resend-email-confirmation/) page. +https://www.docker.io/account/resend-email-confirmation/) page. ### Password reset process diff --git a/docs/sources/reference/api/docker_io_accounts_api.md b/docs/sources/reference/api/docker_io_accounts_api.md index 8186e306f8..b9f76ba92c 100644 --- a/docs/sources/reference/api/docker_io_accounts_api.md +++ b/docs/sources/reference/api/docker_io_accounts_api.md @@ -237,78 +237,7 @@ automatically sent. "primary": false } -### 1.5 Update an email address for a user - -`PATCH /api/v1.1/users/:username/emails/` - -Update an email address for the specified user to either verify an -email address or set it as the primary email for the user. You -cannot use this endpoint to un-verify an email address. You cannot -use this endpoint to unset the primary email, only set another as -the primary. - - Parameters: - - - **username** – username of the user whose email info is being - updated. - - Json Parameters: - -   - - - **email** (*string*) – the email address to be updated. - - **verified** (*boolean*) – (optional) whether the email address - is verified, must be `true` or absent. - - **primary** (*boolean*) – (optional) whether to set the email - address as the primary email, must be `true` - or absent. - - Request Headers: - -   - - - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. - - **Content-Type** – MIME Type of post data. JSON, url-encoded - form data, etc. - - Status Codes: - - - **200** – success, user's email updated. - - **400** – data validation error. - - **401** – authentication error. - - **403** – permission error, authenticated user must be the user - whose data is being updated, OAuth access tokens must have - `email_write` scope. - - **404** – the specified username or email address does not - exist. - - **Example request**: - - Once you have independently verified an email address. - - PATCH /api/v1.1/users/janedoe/emails/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= - - { - "email": "jane.doe+other@example.com", - "verified": true, - } - - **Example response**: - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "email": "jane.doe+other@example.com", - "verified": true, - "primary": false - } - -### 1.6 Delete email address for a user +### 1.5 Delete email address for a user `DELETE /api/v1.1/users/:username/emails/` From 9236e088eb5a9a6d662b08ef7983dbecf01e6ef0 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sat, 26 Apr 2014 18:24:39 -0700 Subject: [PATCH 066/219] Fix bug in engine.Sender Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/remote.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/remote.go b/engine/remote.go index 48638e4383..60aad243c5 100644 --- a/engine/remote.go +++ b/engine/remote.go @@ -55,10 +55,10 @@ func (s *Sender) Handle(job *Job) Status { var status int r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error { cmd := data.Message(p).Get("cmd") - if len(cmd) != 3 { + if len(cmd) != 2 { return fmt.Errorf("usage: %s <0-127>", cmd[0]) } - s, err := strconv.ParseUint(cmd[2], 10, 8) + s, err := strconv.ParseUint(cmd[1], 10, 8) if err != nil { return fmt.Errorf("usage: %s <0-127>", cmd[0]) } From b4b83ef8ae788cb7e016fbf90f0c1b890af7b23d Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sat, 26 Apr 2014 18:47:20 -0700 Subject: [PATCH 067/219] engine/spawn: run an engine in a subprocess, remote-controlled by Beam Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/spawn/spawn.go | 119 +++++++++++++++++++++++++++++++++ engine/spawn/subengine/main.go | 61 +++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100644 engine/spawn/spawn.go create mode 100644 engine/spawn/subengine/main.go diff --git a/engine/spawn/spawn.go b/engine/spawn/spawn.go new file mode 100644 index 0000000000..6680845bc1 --- /dev/null +++ b/engine/spawn/spawn.go @@ -0,0 +1,119 @@ +package spawn + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/beam" + "github.com/dotcloud/docker/utils" + "os" + "os/exec" +) + +var initCalled bool + +// Init checks if the current process has been created by Spawn. +// +// If no, it returns nil and the original program can continue +// unmodified. +// +// If no, it hijacks the process to run as a child worker controlled +// by its parent over a beam connection, with f exposed as a remote +// service. In this case Init never returns. +// +// The hijacking process takes place as follows: +// - Open file descriptor 3 as a beam endpoint. If this fails, +// terminate the current process. +// - Start a new engine. +// - Call f.Install on the engine. Any handlers registered +// will be available for remote invocation by the parent. +// - Listen for beam messages from the parent and pass them to +// the handlers. +// - When the beam endpoint is closed by the parent, terminate +// the current process. +// +// NOTE: Init must be called at the beginning of the same program +// calling Spawn. This is because Spawn approximates a "fork" by +// re-executing the current binary - where it expects spawn.Init +// to intercept the control flow and execute the worker code. +func Init(f engine.Installer) error { + initCalled = true + if os.Getenv("ENGINESPAWN") != "1" { + return nil + } + fmt.Printf("[%d child]\n", os.Getpid()) + // Hijack the process + childErr := func() error { + fd3 := os.NewFile(3, "beam-introspect") + introsp, err := beam.FileConn(fd3) + if err != nil { + return fmt.Errorf("beam introspection error: %v", err) + } + fd3.Close() + defer introsp.Close() + eng := engine.NewReceiver(introsp) + if err := f.Install(eng.Engine); err != nil { + return err + } + if err := eng.Run(); err != nil { + return err + } + return nil + }() + if childErr != nil { + os.Exit(1) + } + os.Exit(0) + return nil // Never reached +} + +// Spawn starts a new Engine in a child process and returns +// a proxy Engine through which it can be controlled. +// +// The commands available on the child engine are determined +// by an earlier call to Init. It is important that Init be +// called at the very beginning of the current program - this +// allows it to be called as a re-execution hook in the child +// process. +// +// Long story short, if you want to expose `myservice` in a child +// process, do this: +// +// func main() { +// spawn.Init(myservice) +// [..] +// child, err := spawn.Spawn() +// [..] +// child.Job("dosomething").Run() +// } +func Spawn() (*engine.Engine, error) { + if !initCalled { + return nil, fmt.Errorf("spawn.Init must be called at the top of the main() function") + } + cmd := exec.Command(utils.SelfPath()) + cmd.Env = append(cmd.Env, "ENGINESPAWN=1") + local, remote, err := beam.SocketPair() + if err != nil { + return nil, err + } + child, err := beam.FileConn(local) + if err != nil { + local.Close() + remote.Close() + return nil, err + } + local.Close() + cmd.ExtraFiles = append(cmd.ExtraFiles, remote) + // FIXME: the beam/engine glue has no way to inform the caller + // of the child's termination. The next call will simply return + // an error. + if err := cmd.Start(); err != nil { + child.Close() + return nil, err + } + eng := engine.New() + if err := engine.NewSender(child).Install(eng); err != nil { + child.Close() + return nil, err + } + return eng, nil +} diff --git a/engine/spawn/subengine/main.go b/engine/spawn/subengine/main.go new file mode 100644 index 0000000000..3be7520a67 --- /dev/null +++ b/engine/spawn/subengine/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/engine/spawn" + "log" + "os" + "os/exec" + "strings" +) + +func main() { + fmt.Printf("[%d] MAIN\n", os.Getpid()) + spawn.Init(&Worker{}) + fmt.Printf("[%d parent] spawning\n", os.Getpid()) + eng, err := spawn.Spawn() + if err != nil { + log.Fatal(err) + } + fmt.Printf("[parent] spawned\n") + job := eng.Job(os.Args[1], os.Args[2:]...) + job.Stdout.Add(os.Stdout) + job.Stderr.Add(os.Stderr) + job.Run() + // FIXME: use the job's status code + os.Exit(0) +} + +type Worker struct { +} + +func (w *Worker) Install(eng *engine.Engine) error { + eng.Register("exec", w.Exec) + eng.Register("cd", w.Cd) + eng.Register("echo", w.Echo) + return nil +} + +func (w *Worker) Exec(job *engine.Job) engine.Status { + fmt.Printf("--> %v\n", job.Args) + cmd := exec.Command(job.Args[0], job.Args[1:]...) + cmd.Stdout = job.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return job.Errorf("%v\n", err) + } + return engine.StatusOK +} + +func (w *Worker) Cd(job *engine.Job) engine.Status { + if err := os.Chdir(job.Args[0]); err != nil { + return job.Errorf("%v\n", err) + } + return engine.StatusOK +} + +func (w *Worker) Echo(job *engine.Job) engine.Status { + fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " ")) + return engine.StatusOK +} From 9b23178f585f65b9dc9558694d6de4207c479801 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 27 Apr 2014 23:57:41 -0700 Subject: [PATCH 068/219] engine.Len returns the number of keys in an env Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/env.go | 7 +++++++ engine/env_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/engine/env.go b/engine/env.go index c43a5ec971..f96795f48c 100644 --- a/engine/env.go +++ b/engine/env.go @@ -36,6 +36,13 @@ func (env *Env) Exists(key string) bool { return exists } +// Len returns the number of keys in the environment. +// Note that len(env) might be different from env.Len(), +// because the same key might be set multiple times. +func (env *Env) Len() int { + return len(env.Map()) +} + func (env *Env) Init(src *Env) { (*env) = make([]string, 0, len(*src)) for _, val := range *src { diff --git a/engine/env_test.go b/engine/env_test.go index c7079ff942..0c66cea04e 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -4,6 +4,34 @@ import ( "testing" ) +func TestEnvLenZero(t *testing.T) { + env := &Env{} + if env.Len() != 0 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenNotZero(t *testing.T) { + env := &Env{} + env.Set("foo", "bar") + env.Set("ga", "bu") + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + +func TestEnvLenDup(t *testing.T) { + env := &Env{ + "foo=bar", + "foo=baz", + "a=b", + } + // len(env) != env.Len() + if env.Len() != 2 { + t.Fatalf("%d", env.Len()) + } +} + func TestNewJob(t *testing.T) { job := mkJob(t, "dummy", "--level=awesome") if job.Name != "dummy" { From b6699111dbf4417dc1871b8e9217e842bcd14295 Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Mon, 28 Apr 2014 18:30:31 +0300 Subject: [PATCH 069/219] Docs/CSS fix: Aallow viewport expand to user's preference This commit removes the "max-width" property of DOM "containers"; Thus letting the viewport to expand to fill the available space. This commit aims to bring pleasure to Docker docs' readers' eyes, And to make them happy by letting them profit more from their large monitors. (or use more efficiently their smaller ones). Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/theme/mkdocs/css/base.css | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/theme/mkdocs/css/base.css b/docs/theme/mkdocs/css/base.css index 863c9cdb0b..e5fac04cf4 100644 --- a/docs/theme/mkdocs/css/base.css +++ b/docs/theme/mkdocs/css/base.css @@ -622,12 +622,6 @@ ol.breadcrumb > li.edit-on-github span { .container { width: 100% !important; } -.container-standard-sized { - max-width: 1050px; -} -.container-better { - max-width: 1050px; -} @media (max-width: 900px) { From 442b70c65a48d4a0d0e543d95fb0d0085c97fd67 Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Mon, 28 Apr 2014 18:36:40 +0300 Subject: [PATCH 070/219] Docs/theme/MAINTAINERS: Modify/Update list of MAINTAINERS So far it has been mostly my duty to create and maintain docs' design/theme. This commit adds myself to the list of maintainers by modifying the MAINTAINERS file under the docs/theme directory. Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/theme/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/theme/MAINTAINERS b/docs/theme/MAINTAINERS index 93231b1223..081aa684d4 100644 --- a/docs/theme/MAINTAINERS +++ b/docs/theme/MAINTAINERS @@ -1 +1,2 @@ +O.S. Tezer (@OSTezer) Thatcher Peskens (@dhrp) From f334dbe82de06f9c71b850da4165076bb3cf8f4b Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Mon, 28 Apr 2014 18:58:15 +0300 Subject: [PATCH 071/219] Docs/CSS: Amend code block rendering. This commit aims to improve the rendering of code blocks by reducing the padding, matching the font-size with the rest of the documentation text and finally, by changing the background colour back to white from its current gray-ish state which matches the background colour and making it really hard to spot the code. Note: The BG colouring issue is due to converting the main BG to gray whilst missing to change parts that where gray to white, i.e., not making the complete switch. Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/theme/mkdocs/css/base.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/theme/mkdocs/css/base.css b/docs/theme/mkdocs/css/base.css index 863c9cdb0b..7a61792fe9 100644 --- a/docs/theme/mkdocs/css/base.css +++ b/docs/theme/mkdocs/css/base.css @@ -55,10 +55,10 @@ h6, margin-bottom: 1.2em; } #content pre { - margin: 2em 0em; - padding: 1em 2em !important; + margin: 1em 0em; + padding: 0.5em 0.75em !important; line-height: 1.8em; - font-size: 1em; + background: #fff; } #content blockquote { background: #f2f2f2; From abf3baf4ebfb8128ed4090165c65de0e3f4be278 Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Mon, 28 Apr 2014 19:02:50 +0300 Subject: [PATCH 072/219] Docs/CSS: Fix Notes/Warnings blocks' BG colour to improve its state. Docs' BG was original "#fff" and blocks such as code blocks or warning-notes blocks were coloured in #F2F2F2. In order to make it easier to read everything, the BG colour was changed to #fff. However, the switch missed to convert other blocks' BG colour. This commit aims to re-introduce the correct contrast by changing the BG colour of warning/notes block to #fff. Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/theme/mkdocs/css/base.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/theme/mkdocs/css/base.css b/docs/theme/mkdocs/css/base.css index 863c9cdb0b..95cc8876d3 100644 --- a/docs/theme/mkdocs/css/base.css +++ b/docs/theme/mkdocs/css/base.css @@ -61,7 +61,7 @@ h6, font-size: 1em; } #content blockquote { - background: #f2f2f2; + background: #fff; border-left-color: #ccc; } #content blockquote p { From bba1dd046dd39405877c9f0b53502e68dddf36b1 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Mon, 28 Apr 2014 13:38:58 -0400 Subject: [PATCH 073/219] Docs fix: correct /commit info Correct documentation for POST /commit to reflect that the container's configuration is supplied in the request body, and not as a query parameter. Also correct a small typo in the example JSON for create container. Docker-DCO-1.1-Signed-off-by: Andy Goldstein (github: ncdc) --- AUTHORS | 1 + .../reference/api/docker_remote_api_v1.10.md | 37 +++++++++++++++++-- .../reference/api/docker_remote_api_v1.11.md | 35 +++++++++++++++++- .../reference/api/docker_remote_api_v1.9.md | 35 +++++++++++++++++- 4 files changed, 101 insertions(+), 7 deletions(-) diff --git a/AUTHORS b/AUTHORS index 6e34065266..03834dcf8c 100644 --- a/AUTHORS +++ b/AUTHORS @@ -20,6 +20,7 @@ Andrew Munsell Andrews Medina Andy Chambers andy diller +Andy Goldstein Andy Rothfusz Andy Smith Anthony Bishopric diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 749ff8e383..9a5414f516 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -131,7 +131,6 @@ Create a container "WorkingDir":"", "DisableNetwork": false, "ExposedPorts":{ - "DisableNetwork": false, "22/tcp": {} } } @@ -1132,6 +1131,33 @@ Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } **Example response**: @@ -1140,6 +1166,13 @@ Create a new image from a container's changes {"Id":"596069db4bf5"} + + Json Parameters: + + + + - **config** - the container's configuration + Query Parameters:   @@ -1150,8 +1183,6 @@ Create a new image from a container's changes - **m** – commit message - **author** – author (eg. "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - - **run** – config automatically applied when the image is run. - (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) Status Codes: diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index baabade455..b3d75abf20 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -1135,6 +1135,33 @@ Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } **Example response**: @@ -1143,6 +1170,12 @@ Create a new image from a container's changes {"Id":"596069db4bf5"} + Json Parameters: + + + + - **config** - the container's configuration + Query Parameters:   @@ -1153,8 +1186,6 @@ Create a new image from a container's changes - **m** – commit message - **author** – author (eg. "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - - **run** – config automatically applied when the image is run. - (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) Status Codes: diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 1fe154a3da..c6c27f84ca 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -1146,6 +1146,33 @@ Create a new image from a container's changes **Example request**: POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname":"", + "User":"", + "Memory":0, + "MemorySwap":0, + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "date" + ], + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "DisableNetwork": false, + "ExposedPorts":{ + "22/tcp": {} + } + } **Example response**: @@ -1154,6 +1181,12 @@ Create a new image from a container's changes {"Id":"596069db4bf5"} + Json Parameters: + + + + - **config** - the container's configuration + Query Parameters:   @@ -1164,8 +1197,6 @@ Create a new image from a container's changes - **m** – commit message - **author** – author (eg. "John Hannibal Smith <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") - - **run** – config automatically applied when the image is run. - (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) Status Codes: From 78421b376897c589a3c7a80a5b94fc3dd74e2741 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 8 Apr 2014 01:00:32 +0000 Subject: [PATCH 074/219] docker rmi -f works with stopped containers + revamped error messages Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server/server.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/server/server.go b/server/server.go index 0ab0a4a00b..8426a9fc41 100644 --- a/server/server.go +++ b/server/server.go @@ -1952,6 +1952,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no var ( repoName, tag string tags = []string{} + tagDeleted bool ) repoName, tag = utils.ParseRepositoryTag(name) @@ -2002,7 +2003,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no //Untag the current image for _, tag := range tags { - tagDeleted, err := srv.daemon.Repositories().Delete(repoName, tag) + tagDeleted, err = srv.daemon.Repositories().Delete(repoName, tag) if err != nil { return err } @@ -2016,7 +2017,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, no tags = srv.daemon.Repositories().ByID()[img.ID] if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { if len(byParents[img.ID]) == 0 { - if err := srv.canDeleteImage(img.ID); err != nil { + if err := srv.canDeleteImage(img.ID, force, tagDeleted); err != nil { return err } if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil { @@ -2059,7 +2060,11 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) canDeleteImage(imgID string) error { +func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error { + var message string + if untagged { + message = " (but image was untagged)" + } for _, container := range srv.daemon.List() { parent, err := srv.daemon.Repositories().LookupImage(container.Image) if err != nil { @@ -2068,7 +2073,14 @@ func (srv *Server) canDeleteImage(imgID string) error { if err := parent.WalkHistory(func(p *image.Image) error { if imgID == p.ID { - return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + if container.State.IsRunning() { + if force { + return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } + return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } else if !force { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message) + } } return nil }); err != nil { From 68d8d9a62d6199d0e285f8ec07b1a3cbdada8ac8 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 28 Apr 2014 19:03:31 +0000 Subject: [PATCH 075/219] update message Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server/server.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/server/server.go b/server/server.go index 8426a9fc41..caed481a9f 100644 --- a/server/server.go +++ b/server/server.go @@ -24,19 +24,6 @@ package server import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon" - "github.com/dotcloud/docker/daemonconfig" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "log" @@ -53,6 +40,20 @@ import ( "sync" "syscall" "time" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon" + "github.com/dotcloud/docker/daemonconfig" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/graph" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/signal" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" ) // jobInitApi runs the remote api server `srv` as a daemon, @@ -2063,7 +2064,7 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status { func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error { var message string if untagged { - message = " (but image was untagged)" + message = " (docker untagged the image)" } for _, container := range srv.daemon.List() { parent, err := srv.daemon.Repositories().LookupImage(container.Image) From a8871b93b95d1220c860987ae1da4e49c943bfea Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sat, 26 Apr 2014 08:00:01 -0400 Subject: [PATCH 076/219] Addressed regression of private repository documentation. This adds back in the references to private repositories and provides some refactoring to the Working with repositories documentation including updating references to the "Central" registry to Docker.io. It also: * Fixes some links and references to Central Index * Fixes anchors in other files to updated titles in Working with Repositories. * Renamed Central Index in the remaining places. * Updated terms documentation to reflect Docker.io * Updated some Docker Index naming to be consistent. * Updates menu labels and hyperlinks. Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/mkdocs.yml | 13 +- docs/sources/docker-io/accounts.md | 32 +++++ docs/sources/{index => docker-io}/builds.md | 28 ++--- docs/sources/docker-io/home.md | 13 ++ docs/sources/{index => docker-io}/index.md | 0 docs/sources/{index => docker-io}/repos.md | 27 ++-- docs/sources/examples/hello_world.md | 9 +- docs/sources/index/accounts.md | 31 ----- docs/sources/index/home.md | 13 -- docs/sources/introduction/technology.md | 40 +++--- .../introduction/understanding-docker.md | 8 +- docs/sources/reference/api.md | 2 +- .../api/archive/docker_remote_api_v1.0.md | 2 +- .../api/archive/docker_remote_api_v1.1.md | 2 +- .../api/archive/docker_remote_api_v1.2.md | 2 +- .../api/archive/docker_remote_api_v1.3.md | 2 +- .../api/archive/docker_remote_api_v1.4.md | 2 +- .../api/archive/docker_remote_api_v1.5.md | 2 +- .../api/archive/docker_remote_api_v1.6.md | 2 +- .../api/archive/docker_remote_api_v1.7.md | 2 +- .../api/archive/docker_remote_api_v1.8.md | 2 +- .../api/{index_api.md => docker-io_api.md} | 10 +- .../reference/api/docker_remote_api_v1.10.md | 2 +- .../reference/api/docker_remote_api_v1.11.md | 2 +- .../reference/api/docker_remote_api_v1.9.md | 2 +- docs/sources/reference/commandline/cli.md | 12 +- docs/sources/terms/registry.md | 4 +- docs/sources/terms/repository.md | 2 +- docs/sources/use/basics.md | 6 +- docs/sources/use/workingwithrepository.md | 117 ++++++++++-------- 30 files changed, 205 insertions(+), 186 deletions(-) create mode 100644 docs/sources/docker-io/accounts.md rename docs/sources/{index => docker-io}/builds.md (84%) create mode 100644 docs/sources/docker-io/home.md rename docs/sources/{index => docker-io}/index.md (100%) rename docs/sources/{index => docker-io}/repos.md (75%) delete mode 100644 docs/sources/index/accounts.md delete mode 100644 docs/sources/index/home.md rename docs/sources/reference/api/{index_api.md => docker-io_api.md} (98%) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 0b526e016b..7971c56d9e 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -82,12 +82,12 @@ pages: # - ['user-guide/configuration.md', 'User Guide', 'Configuration'] # ./faq.md -# Docker Index docs: -- ['index/index.md', '**HIDDEN**'] +# Docker.io docs: +- ['docker-io/index.md', '**HIDDEN**'] # - ['index/home.md', 'Docker Index', 'Help'] -- ['index/accounts.md', 'Docker Index', 'Accounts'] -- ['index/repos.md', 'Docker Index', 'Repositories'] -- ['index/builds.md', 'Docker Index', 'Trusted Builds'] +- ['docker-io/accounts.md', 'Docker.io', 'Accounts'] +- ['docker-io/repos.md', 'Docker.io', 'Repositories'] +- ['docker-io/builds.md', 'Docker.io', 'Trusted Builds'] # Reference - ['reference/index.md', '**HIDDEN**'] @@ -99,7 +99,7 @@ pages: - ['articles/security.md', 'Reference', 'Security'] - ['articles/baseimages.md', 'Reference', 'Creating a Base Image'] - ['use/networking.md', 'Reference', 'Advanced networking'] -- ['reference/api/index_api.md', 'Reference', 'Docker Index API'] +- ['reference/api/docker-io_api.md', 'Reference', 'Docker.io API'] - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] - ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] @@ -126,4 +126,3 @@ pages: - ['terms/repository.md', '**HIDDEN**', 'repository'] - ['terms/filesystem.md', '**HIDDEN**', 'filesystem'] - ['terms/image.md', '**HIDDEN**', 'image'] - diff --git a/docs/sources/docker-io/accounts.md b/docs/sources/docker-io/accounts.md new file mode 100644 index 0000000000..cfbcd9512c --- /dev/null +++ b/docs/sources/docker-io/accounts.md @@ -0,0 +1,32 @@ +page_title: Accounts on Docker.io +page_description: Docker.io accounts +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation + +# Accounts on Docker.io + +## Docker.io Accounts + +You can `search` for Docker images and `pull` them from [Docker.io](https://index.docker.io) +without signing in or even having an account. However, in order to `push` images, +leave comments or to *star* a repository, you are going to need a [Docker.io]( +https://www.docker.io) account. + +### Registration for a Docker.io Account + +You can get a [Docker.io](https://index.docker.io) account by +[signing up for one here](https://www.docker.io/account/signup/). A valid +email address is required to register, which you will need to verify for +account activation. + +### Email activation process + +You need to have at least one verified email address to be able to use your +[Docker.io](https://index.docker.io) account. If you can't find the validation email, +you can request another by visiting the [Resend Email Confirmation]( +https://www.docker.io/account/resend-email-confirmation/) page. + +### Password reset process + +If you can't access your account for some reason, you can reset your password +from the [*Password Reset*](https://www.docker.io/account/forgot-password/) +page. \ No newline at end of file diff --git a/docs/sources/index/builds.md b/docs/sources/docker-io/builds.md similarity index 84% rename from docs/sources/index/builds.md rename to docs/sources/docker-io/builds.md index 899971c201..0ca058663a 100644 --- a/docs/sources/index/builds.md +++ b/docs/sources/docker-io/builds.md @@ -1,15 +1,15 @@ -page_title: Trusted Builds in the Docker Index -page_description: Docker Index Trusted Builds -page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds +page_title: Trusted Builds on Docker.io +page_description: Docker.io Trusted Builds +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation, trusted, builds, trusted builds -# Trusted Builds in the Docker Index +# Trusted Builds on Docker.io ## Trusted Builds *Trusted Builds* is a special feature allowing you to specify a source repository with a *Dockerfile* to be built by the Docker build clusters. The system will clone your repository and build the Dockerfile using the repository -as the context. The resulting image will then be uploaded to the index and +as the context. The resulting image will then be uploaded to the registry and marked as a `Trusted Build`. Trusted Builds have a number of advantages. For example, users of *your* Trusted @@ -17,18 +17,18 @@ Build can be certain that the resulting image was built exactly how it claims to be. Furthermore, the Dockerfile will be available to anyone browsing your repository -on the Index. Another advantage of the Trusted Builds feature is the automated +on the registry. Another advantage of the Trusted Builds feature is the automated builds. This makes sure that your repository is always up to date. ### Linking with a GitHub account -In order to setup a Trusted Build, you need to first link your Docker Index -account with a GitHub one. This will allow the Docker Index to see your -repositories. +In order to setup a Trusted Build, you need to first link your [Docker.io]( +https://index.docker.io) account with a GitHub one. This will allow the registry +to see your repositories. -> *Note:* We currently request access for *read* and *write* since the Index -> needs to setup a GitHub service hook. Although nothing else is done with -> your account, this is how GitHub manages permissions, sorry! +> *Note:* We currently request access for *read* and *write* since [Docker.io]( +> https://index.docker.io) needs to setup a GitHub service hook. Although nothing +> else is done with your account, this is how GitHub manages permissions, sorry! ### Creating a Trusted Build @@ -77,8 +77,8 @@ Trusted Build: ### The Dockerfile and Trusted Builds During the build process, we copy the contents of your Dockerfile. We also -add it to the Docker Index for the Docker community to see on the repository -page. +add it to the [Docker.io](https://index.docker.io) for the Docker community +to see on the repository page. ### README.md diff --git a/docs/sources/docker-io/home.md b/docs/sources/docker-io/home.md new file mode 100644 index 0000000000..d29de76fbf --- /dev/null +++ b/docs/sources/docker-io/home.md @@ -0,0 +1,13 @@ +page_title: The Docker.io Registry Help +page_description: The Docker Registry help documentation home +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation + +# The Docker.io Registry Help + +## Introduction + +For your questions about the [Docker.io](https://index.docker.io) registry you +can use [this documentation](docs.md). + +If you can not find something you are looking for, please feel free to +[contact us](https://index.docker.io/help/support/). \ No newline at end of file diff --git a/docs/sources/index/index.md b/docs/sources/docker-io/index.md similarity index 100% rename from docs/sources/index/index.md rename to docs/sources/docker-io/index.md diff --git a/docs/sources/index/repos.md b/docs/sources/docker-io/repos.md similarity index 75% rename from docs/sources/index/repos.md rename to docs/sources/docker-io/repos.md index 40b270a0b8..a9bdabd89b 100644 --- a/docs/sources/index/repos.md +++ b/docs/sources/docker-io/repos.md @@ -1,15 +1,16 @@ -page_title: Repositories and Images in the Docker Index -page_description: Docker Index repositories -page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation +page_title: Repositories and Images on Docker.io +page_description: Repositories and Images on Docker.io +page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker.io, docs, documentation -# Repositories and Images in the Docker Index +# Repositories and Images on Docker.io ## Searching for repositories and images You can `search` for all the publicly available repositories and images using Docker. If a repository is not public (i.e., private), it won't be listed on -the Index search results. To see repository statuses, you can look at your -[profile page](https://index.docker.io/account/). +the repository search results. To see repository statuses, you can look at your +[profile page](https://index.docker.io/account/) on [Docker.io]( +https://index.docker.io). ## Repositories @@ -22,20 +23,20 @@ of bookmark your favorites. You can interact with other members of the Docker community and maintainers by leaving comments on repositories. If you find any comments that are not -appropriate, you can flag them for the Index admins' review. +appropriate, you can flag them for the admins' review. ### Private Docker Repositories -To work with a private repository on the Docker Index, you will need to add one -via the [Add Repository](https://index.docker.io/account/repositories/add) link. -Once the private repository is created, you can `push` and `pull` images to and -from it using Docker. +To work with a private repository on [Docker.io](https://index.docker.io), you +will need to add one via the [Add Repository](https://index.docker.io/account/repositories/add) +link. Once the private repository is created, you can `push` and `pull` images +to and from it using Docker. > *Note:* You need to be signed in and have access to work with a private > repository. Private repositories are just like public ones. However, it isn't possible to -browse them or search their content on the public index. They do not get cached +browse them or search their content on the public registry. They do not get cached the same way as a public repository either. It is possible to give access to a private repository to those whom you @@ -44,7 +45,7 @@ designate (i.e., collaborators) from its settings page. From there, you can also switch repository status (*public* to *private*, or viceversa). You will need to have an available private repository slot open before you can do such a switch. If you don't have any, you can always upgrade -your [Docker Index plan](https://index.docker.io/plans/). +your [Docker.io](https://index.docker.io/plans/) plan. ### Collaborators and their role diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md index c7e821136c..ba573527c1 100644 --- a/docs/sources/examples/hello_world.md +++ b/docs/sources/examples/hello_world.md @@ -35,11 +35,10 @@ Download the small base image named `busybox`: # Download a busybox image $ sudo docker pull busybox -The `busybox` image is a minimal Linux system. You -can do the same with any number of other images, such as -`debian`, `ubuntu` or -`centos`. The images can be found and retrieved -using the [Docker index](http://index.docker.io). +The `busybox` image is a minimal Linux system. You can do the same with +any number of other images, such as `debian`, `ubuntu` or `centos`. The +images can be found and retrieved using the +[Docker.io](http://index.docker.io) registry. $ sudo docker run busybox /bin/echo hello world diff --git a/docs/sources/index/accounts.md b/docs/sources/index/accounts.md deleted file mode 100644 index 54d015ac2a..0000000000 --- a/docs/sources/index/accounts.md +++ /dev/null @@ -1,31 +0,0 @@ -page_title: Accounts in the Docker Index -page_description: Docker Index accounts -page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation - -# Accounts in the Docker Index - -## Docker IO and Docker Index Accounts - -You can `search` for Docker images and `pull` them from the [Docker Index]( -https://index.docker.io) without signing in or even having an account. However, -in order to `push` images, leave comments or to *star* a repository, you are going -to need a [Docker IO](https://www.docker.io) account. - -### Registration for a Docker IO Account - -You can get a Docker IO account by [signing up for one here]( -https://www.docker.io/account/signup/). A valid email address is required to -register, which you will need to verify for account activation. - -### Email activation process - -You need to have at least one verified email address to be able to use your -Docker IO account. If you can't find the validation email, you can request -another by visiting the [Resend Email Confirmation]( -https://www.docker.io/account/resend-email-confirmation/) page. - -### Password reset process - -If you can't access your account for some reason, you can reset your password -from the [*Password Reset*](https://www.docker.io/account/forgot-password/) -page. \ No newline at end of file diff --git a/docs/sources/index/home.md b/docs/sources/index/home.md deleted file mode 100644 index 1b03df4ab7..0000000000 --- a/docs/sources/index/home.md +++ /dev/null @@ -1,13 +0,0 @@ -page_title: The Docker Index Help -page_description: The Docker Index help documentation home -page_keywords: Docker, docker, index, accounts, plans, Dockerfile, Docker.io, docs, documentation - -# The Docker Index Help - -## Introduction - -For your questions about the [Docker Index](https://index.docker.io) you can -use [this documentation](docs.md). - -If you can not find something you are looking for, please feel free to -[contact us](https://index.docker.io/help/support/). \ No newline at end of file diff --git a/docs/sources/introduction/technology.md b/docs/sources/introduction/technology.md index 346a118c39..a724e4aae6 100644 --- a/docs/sources/introduction/technology.md +++ b/docs/sources/introduction/technology.md @@ -43,7 +43,7 @@ Docker's main components are: - Docker *daemon*; - Docker *client*, and; - - The Docker Index. + - [Docker.io](https://index.docker.io) registry. ### The Docker daemon @@ -57,9 +57,9 @@ The Docker client is the primary user interface to Docker. It is tasked with accepting commands from the user and communicating back and forth with a Docker daemon to manage the container lifecycle on any host. -### Docker Index, the central Docker registry +### Docker.io registry -The [Docker Index](http://index.docker.io) is the global archive (and +[Docker.io](https://index.docker.io) is the global archive (and directory) of user supplied Docker container images. It currently hosts a large – in fact, rapidly growing – number of projects where you can find almost any popular application or deployment stack readily @@ -70,28 +70,29 @@ tools for everyone to grow with other *Dockers*. By issuing a single command through the Docker client you can start sharing your own creations with the rest of the world. -However, knowing that not everything can be shared the Docker Index also -offers private repositories. In order to see the available plans, you -can click [here](https://index.docker.io/plans). +However, knowing that not everything can be shared the [Docker.io]( +https://index.docker.io) also offers private repositories. In order to see +the available plans, you can click [here](https://index.docker.io/plans). -Using the [Docker Registry](https://github.com/dotcloud/docker-registry), it is +Using [*docker-registry*](https://github.com/dotcloud/docker-registry), it is also possible to run your own private Docker image registry service on your own servers. -> **Note:** To learn more about the [*Docker Image Index*]( -> http://index.docker.io) (public *and* private), check out the [Registry & +> **Note:** To learn more about the [*Docker.io*](http://index.docker.io) +> registry (for public *and* private repositories), check out the [Registry & > Index Spec](http://docs.docker.io/api/registry_index_spec/). ### Summary - **When you install Docker, you get all the components:** - The daemon, the client and access to the public image registry: the [Docker Index](http://index.docker.io). + The daemon, the client and access to the [Docker.io](http://index.docker.io) registry. - **You can run these components together or distributed:** Servers with the Docker daemon running, controlled by the Docker client. - **You can benefit form the public registry:** Download and build upon images created by the community. - **You can start a private repository for proprietary use.** - Sign up for a [plan](https://index.docker.io/plans) or host your own [Docker registry](https://github.com/dotcloud/docker-registry). + Sign up for a [plan](https://index.docker.io/plans) or host your own [docker-registry]( +https://github.com/dotcloud/docker-registry). ## Elements of Docker @@ -198,7 +199,7 @@ Docker begins with: - **Pulling the `ubuntu` image:** Docker checks for the presence of the `ubuntu` image and if it doesn't - exist locally on the host, then Docker downloads it from the [Docker Index](https://index.docker.io) + exist locally on the host, then Docker downloads it from [Docker.io](https://index.docker.io) - **Creates a new container:** Once Docker has the image it creates a container from it. - **Allocates a filesystem and mounts a read-write _layer_:** @@ -226,28 +227,27 @@ UnionFS technology we saw earlier. Every image starts from a base image, for example `ubuntu` a base Ubuntu image or `fedora` a base Fedora image. Docker builds and provides these -base images via the [Docker Index](http://index.docker.io). +base images via [Docker.io](http://index.docker.io). ### How does a Docker registry work? The Docker registry is a store for your Docker images. Once you build a -Docker image you can *push* it to the [Docker -Index](http://index.docker.io) or to a private registry you run behind -your firewall. +Docker image you can *push* it to a public or private repository on [Docker.io]( +http://index.docker.io) or to your own registry running behind your firewall. Using the Docker client, you can search for already published images and then pull them down to your Docker host to build containers from them (or even build on these images). -The [Docker Index](http://index.docker.io) provides both public and +[Docker.io](http://index.docker.io) provides both public and private storage for images. Public storage is searchable and can be downloaded by anyone. Private repositories are excluded from search results and only you and your users can pull them down and use them to build containers. You can [sign up for a plan here](https://index.docker.io/plans). -To learn more, check out the [Working With Repositories]( -http://docs.docker.io/use/workingwithrepository) section of our -[User's Manual](http://docs.docker.io). +To learn more, check out the [Working with Repositories]( +http://docs.docker.io/use/workingwithrepository) section from the +[Docker documentation](http://docs.docker.io). ## Where to go from here diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md index 5920da5bca..53f5e43179 100644 --- a/docs/sources/introduction/understanding-docker.md +++ b/docs/sources/introduction/understanding-docker.md @@ -179,10 +179,10 @@ Without dealing with complicated commands or third party applications. Docker allows you to share the images you've built with the world. And lots of people have already shared their own images. -To facilitate this sharing Docker comes with a public registry and index -called the [Docker Index](http://index.docker.io). If you don't want -your images to be public you can also use private images on the Index or -even run your own registry behind your firewall. +To facilitate this sharing Docker comes with a public registry called +[Docker.io](http://index.docker.io). If you don't want your images to be +public you can also use private images on [Docker.io](https://index.docker.io) +or even run your own registry behind your firewall. **This translates to:** diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md index 254db25e92..9f185a0e37 100644 --- a/docs/sources/reference/api.md +++ b/docs/sources/reference/api.md @@ -42,7 +42,7 @@ interfaces: - [3 Authorization](registry_api/#authorization) - - [Docker Index API](index_api/) + - [Docker.io API](index_api/) - [1. Brief introduction](index_api/#brief-introduction) - [2. Endpoints](index_api/#endpoints) - [2.1 Repository](index_api/#repository) diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.0.md b/docs/sources/reference/api/archive/docker_remote_api_v1.0.md index dffee87dca..d719ca27e8 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.0.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.0.md @@ -734,7 +734,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.1.md b/docs/sources/reference/api/archive/docker_remote_api_v1.1.md index 32220e79cf..21997e5488 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.1.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.1.md @@ -745,7 +745,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.2.md b/docs/sources/reference/api/archive/docker_remote_api_v1.2.md index 19703a0028..17967eab3d 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.2.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.2.md @@ -772,7 +772,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.3.md b/docs/sources/reference/api/archive/docker_remote_api_v1.3.md index 510719ee00..9f7bd22e32 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.3.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.3.md @@ -821,7 +821,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md index a7d52de871..f31f87e55f 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md @@ -866,7 +866,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md index c9fd854f44..1d0b7e203f 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md @@ -871,7 +871,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md index 2ec7336a75..ebf2843e93 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md @@ -976,7 +976,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index +Search for an image on [Docker.io](https://index.docker.io) **Example request**: diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md index cf748a7f9b..0f18e09d0a 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md @@ -902,7 +902,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index. +Search for an image on [Docker.io](https://index.docker.io). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md index 8520e9f1e5..53a8e4d7e1 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md @@ -944,7 +944,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index. +Search for an image on [Docker.io](https://index.docker.io). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON diff --git a/docs/sources/reference/api/index_api.md b/docs/sources/reference/api/docker-io_api.md similarity index 98% rename from docs/sources/reference/api/index_api.md rename to docs/sources/reference/api/docker-io_api.md index 161b3e0c71..66cf311b41 100644 --- a/docs/sources/reference/api/index_api.md +++ b/docs/sources/reference/api/docker-io_api.md @@ -1,12 +1,12 @@ -page_title: Index API -page_description: API Documentation for Docker Index -page_keywords: API, Docker, index, REST, documentation +page_title: Docker.io API +page_description: API Documentation for the Docker.io API +page_keywords: API, Docker, index, REST, documentation, Docker.io, registry -# Docker Index API +# Docker.io API ## Introduction -- This is the REST API for the Docker index +- This is the REST API for [Docker.io](http://index.docker.io). - Authorization is done with basic auth over SSL - Not all commands require authentication, only those noted as such. diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 749ff8e383..0036e249cc 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -934,7 +934,7 @@ Tag the image `name` into a repository `GET /images/search` -Search for an image in the docker index. +Search for an image on [Docker.io](https://index.docker.io). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index baabade455..64d728300c 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -937,7 +937,7 @@ Remove the image `name` from the filesystem `GET /images/search` -Search for an image in the docker index. +Search for an image on [Docker.io](https://index.docker.io). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 1fe154a3da..44238fe07a 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -947,7 +947,7 @@ Tag the image `name` into a repository `GET /images/search` -Search for an image in the docker index. +Search for an image on [Docker.io](https://index.docker.io). > **Note**: > The response keys have changed from API v1.6 to reflect the JSON diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index c9bf5753ca..ac589c01b2 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -694,10 +694,10 @@ Pull an image or a repository from the registry Usage: docker pull NAME[:TAG] Most of your images will be created on top of a base image from the -Docker Index ([https://index.docker.io](https://index.docker.io)). +[Docker.io](https://index.docker.io) registry. -The Docker Index contains many pre-built images that you can -`pull` and try without needing to define and configure your own. +[Docker.io](https://index.docker.io) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. To download a particular image, or set of images (i.e., a repository), use `docker pull`: @@ -1067,7 +1067,7 @@ It is used to create a backup that can then be used with ## search -Search the docker index for images +Search [Docker.io](https://index.docker.io) for images Usage: docker search TERM @@ -1075,8 +1075,8 @@ Search the docker index for images -s, --stars=0: Only displays with at least xxx stars -t, --trusted=false: Only show trusted builds -See [*Find Public Images on the Central Index*]( -/use/workingwithrepository/#searching-central-index) for +See [*Find Public Images on Docker.io*]( +/use/workingwithrepository/#find-public-images-on-dockerio) for more details on finding shared images from the commandline. ## start diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md index bb3209ebac..2006710607 100644 --- a/docs/sources/terms/registry.md +++ b/docs/sources/terms/registry.md @@ -1,6 +1,6 @@ page_title: Registry page_description: Definition of an Registry -page_keywords: containers, lxc, concepts, explanation, image, repository, container +page_keywords: containers, concepts, explanation, image, repository, container # Registry @@ -11,7 +11,7 @@ A Registry is a hosted service containing [*repositories*]( responds to the Registry API. The default registry can be accessed using a browser at -[http://images.docker.io](http://images.docker.io) or using the +[Docker.io](http://index.docker.io) or using the `sudo docker search` command. ## Further Reading diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md index 52760ac20d..1e035c95f4 100644 --- a/docs/sources/terms/repository.md +++ b/docs/sources/terms/repository.md @@ -1,6 +1,6 @@ page_title: Repository page_description: Definition of an Repository -page_keywords: containers, lxc, concepts, explanation, image, repository, container +page_keywords: containers, concepts, explanation, image, repository, container # Repository diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md index bbe967cc7c..b9d52877e4 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/use/basics.md @@ -25,9 +25,9 @@ for installation instructions. # Download an ubuntu image sudo docker pull ubuntu -This will find the `ubuntu` image by name in the -[*Central Index*](../workingwithrepository/#searching-central-index) and -download it from the top-level Central Repository to a local image +This will find the `ubuntu` image by name on +[*Docker.io*](../workingwithrepository/#find-public-images-on-dockerio) and +download it from [Docker.io](https://index.docker.io) to a local image cache. > **Note**: diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md index 2ffca34ce5..e3daf05fc7 100644 --- a/docs/sources/use/workingwithrepository.md +++ b/docs/sources/use/workingwithrepository.md @@ -6,33 +6,35 @@ page_keywords: repo, repositories, usage, pull image, push image, image, documen ## Introduction -A *repository* is a shareable collection of tagged +Docker is not only a tool for creating and managing your own +[*containers*](/terms/container/#container-def) – **Docker is also a +tool for sharing**. A *repository* is a shareable collection of tagged [*images*](/terms/image/#image-def) that together create the file systems for containers. The repository's name is a label that indicates the provenance of the repository, i.e. who created it and where the original copy is located. -You can find one or more repositories hosted on a *registry*. There can -be an implicit or explicit host name as part of the repository tag. The -implicit registry is located at `index.docker.io`, -the home of "top-level" repositories and the Central Index. This -registry may also include public "user" repositories. +You can find one or more repositories hosted on a *registry*. There are +two types of *registry*: public and private. There's also a default +*registry* that Docker uses which is called +[Docker.io](http://index.docker.io). +[Docker.io](http://index.docker.io) is the home of +"top-level" repositories and public "user" repositories. The Docker +project provides [Docker.io](http://index.docker.io) to host public and +[private repositories](https://index.docker.io/plans/), namespaced by +user. We provide user authentication and search over all the public +repositories. -Docker is not only a tool for creating and managing your own -[*containers*](/terms/container/#container-def) – **Docker is also -a tool for sharing**. The Docker project provides a Central Registry to -host public repositories, namespaced by user, and a Central Index which -provides user authentication and search over all the public -repositories. You can host your own Registry too! Docker acts as a -client for these services via `docker search, pull, login` -and `push`. +Docker acts as a client for these services via the `docker search, pull, +login` and `push` commands. ## Repositories ### Local Repositories Docker images which have been created and labeled on your local Docker -server need to be pushed to a Public or Private registry to be shared. +server need to be pushed to a Public (by default they are pushed to +[Docker.io](http://index.docker.io)) or Private registry to be shared. ### Public Repositories @@ -41,22 +43,29 @@ which are controlled by the Docker team, and *user* repositories created by individual contributors. Anyone can read from these repositories – they really help people get started quickly! You could also use [*Trusted Builds*](#trusted-builds) if you need to keep -control of who accesses your images, but we will only refer to public -repositories in these examples. +control of who accesses your images. - Top-level repositories can easily be recognized by **not** having a - `/` (slash) in their name. These repositories can generally be trusted. + `/` (slash) in their name. These repositories represent trusted images + provided by the Docker team. - User repositories always come in the form of `/`. - This is what your published images will look like if you push to the public - Central Registry. -- Only the authenticated user can push to their *username* namespace - on the Central Registry. -- User images are not checked, it is therefore up to you whether or not you - trust the creator of this image. + This is what your published images will look like if you push to the + public [Docker.io](http://index.docker.io) registry. +- Only the authenticated user can push to their *username* namespace on + a [Docker.io](http://index.docker.io) repository. +- User images are not curated, it is therefore up to you whether or not + you trust the creator of this image. -## Find Public Images on the Central Index +### Private repositories -You can search the Central Index [online](https://index.docker.io) or +You can also create private repositories on +[Docker.io](https://index.docker.io/plans/). These allow you to store +images that you don't want to share publicly. Only authenticated users +can push to private repositories. + +## Find Public Images on Docker.io + +You can search the [Docker.io](https://index.docker.io) registry or using the command line interface. Searching can find images by name, user name or description: @@ -78,7 +87,7 @@ There you can see two example results: `centos` and shows that it comes from the public repository of a user, `slantview/`, while the first result (`centos`) doesn't explicitly list a repository so -it comes from the trusted Central Repository. The `/` +it comes from the trusted top-level namespace. The `/` character separates a user's repository and the image name. Once you have found the image name, you can download it: @@ -92,13 +101,13 @@ What can you do with that image? Check out the [*Examples*](/examples/#example-list) and, when you're ready with your own image, come back here to learn how to share it. -## Contributing to the Central Registry +## Contributing to Docker.io -Anyone can pull public images from the Central Registry, but if you -would like to share one of your own images, then you must register a -unique user name first. You can create your username and login on the -[central Docker Index online](https://index.docker.io/account/signup/), -or by running +Anyone can pull public images from the +[Docker.io](http://index.docker.io) registry, but if you would like to +share one of your own images, then you must register a unique user name +first. You can create your username and login on +[Docker.io](https://index.docker.io/account/signup/), or by running sudo docker login @@ -110,15 +119,19 @@ also prompt you to enter a password and your e-mail address. It will then automatically log you in. Now you're ready to commit and push your own images! +> **Note:** +> Your authentication credentials will be stored in the [`.dockercfg` +> authentication file](#authentication-file). + ## Committing a Container to a Named Image When you make changes to an existing image, those changes get saved to a container's file system. You can then promote that container to become -an image by making a `commit`. In addition to -converting the container to an image, this is also your opportunity to -name the image, specifically a name that includes your user name from -the Central Docker Index (as you did a `login` -above) and a meaningful name for the image. +an image by making a `commit`. In addition to converting the container +to an image, this is also your opportunity to name the image, +specifically a name that includes your user name from +[Docker.io](http://index.docker.io) (as you did a `login` above) and a +meaningful name for the image. # format is "sudo docker commit /" $ sudo docker commit $CONTAINER_ID myname/kickassapp @@ -143,7 +156,7 @@ when you push a commit. ### To setup a trusted build -1. Create a [Docker Index account](https://index.docker.io/) and login. +1. Create a [Docker.io account](https://index.docker.io/) and login. 2. Link your GitHub account through the `Link Accounts` menu. 3. [Configure a Trusted build](https://index.docker.io/builds/). 4. Pick a GitHub project that has a `Dockerfile` that you want to build. @@ -154,8 +167,9 @@ when you push a commit. Once the Trusted Build is configured it will automatically trigger a build, and in a few minutes, if there are no errors, you will see your -new trusted build on the Docker Index. It will will stay in sync with -your GitHub repo until you deactivate the Trusted Build. +new trusted build on the [Docker.io](https://index.docker.io) Registry. +It will will stay in sync with your GitHub repo until you deactivate the +Trusted Build. If you want to see the status of your Trusted Builds you can go to your [Trusted Builds page](https://index.docker.io/builds/) on the Docker @@ -167,15 +181,20 @@ cannot however push to a Trusted Build with the `docker push` command. You can only manage it by committing code to your GitHub repository. You can create multiple Trusted Builds per repository and configure them -to point to specific Dockerfile's or Git branches. +to point to specific `Dockerfile`'s or Git branches. ## Private Registry -Private registries and private shared repositories are only possible by -hosting [your own registry](https://github.com/dotcloud/docker-registry). -To push or pull to a repository on your own registry, you must prefix the -tag with the address of the registry's host (a `.` or `:` is used to identify -a host), like this: +Private registries are possible by hosting [your own +registry](https://github.com/dotcloud/docker-registry). + +> **Note**: +> You can also use private repositories on +> [Docker.io](https://index.docker.io/plans/). + +To push or pull to a repository on your own registry, you must prefix +the tag with the address of the registry's host (a `.` or `:` is used to +identify a host), like this: # Tag to create a repository with the full registry location. # The location (e.g. localhost.localdomain:5000) becomes @@ -187,9 +206,9 @@ a host), like this: Once a repository has your registry's host name as part of the tag, you can push and pull it like any other repository, but it will **not** be -searchable (or indexed at all) in the Central Index, and there will be +searchable (or indexed at all) on [Docker.io](http://index.docker.io), and there will be no user name checking performed. Your registry will function completely -independently from the Central Index. +independently from the [Docker.io](http://index.docker.io) registry. From e9a42a45bfe296850a72ba6ee5e7c0c04534ea34 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Thu, 24 Apr 2014 18:22:22 -0700 Subject: [PATCH 077/219] Fixes #5152 : symlink in volume path Docker-DCO-1.1-Signed-off-by: Tibor Vass (github: tiborvass) --- daemon/volumes.go | 21 ++++++--- integration-cli/docker_cli_run_test.go | 43 +++++++++++++++++++ .../TestVolumeWithSymlink/Dockerfile | 3 ++ 3 files changed, 62 insertions(+), 5 deletions(-) create mode 100644 integration-cli/run_tests/TestVolumeWithSymlink/Dockerfile diff --git a/daemon/volumes.go b/daemon/volumes.go index 4a5c4475b7..66266ef061 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -217,15 +217,26 @@ func createVolumes(container *Container) error { srcPath = p } - container.Volumes[volPath] = srcPath - container.VolumesRW[volPath] = srcRW - // Create the mountpoint - volPath = filepath.Join(container.basefs, volPath) - rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) + rootVolPath, err := utils.FollowSymlinkInScope(filepath.Join(container.basefs, volPath), container.basefs) if err != nil { return err } + + newVolPath, err := filepath.Rel(container.basefs, rootVolPath) + if err != nil { + return err + } + newVolPath = "/" + newVolPath + + if volPath != newVolPath { + delete(container.Volumes, volPath) + delete(container.VolumesRW, volPath) + } + + container.Volumes[newVolPath] = srcPath + container.VolumesRW[newVolPath] = srcRW + if err := createIfNotExists(rootVolPath, volIsDir); err != nil { return err } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 5973f2fe1b..4a770bef1e 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "os/exec" + "path/filepath" "regexp" "sort" "strings" @@ -424,6 +425,48 @@ func TestCreateVolume(t *testing.T) { logDone("run - create docker mangaed volume") } +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func TestVolumeWithSymlink(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "run_tests", "TestVolumeWithSymlink") + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-volumewithsymlink", ".") + buildCmd.Dir = buildDirectory + err := buildCmd.Run() + if err != nil { + t.Fatal("could not build 'docker-test-volumewithsymlink': %v", err) + } + + cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-volumewithsymlink", "docker-test-volumewithsymlink", "sh", "-c", "mount | grep -q /foo/foo") + exitCode, err := runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatal("[run] err: %v, exitcode: %d", err, exitCode) + } + + var volPath string + cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-volumewithsymlink") + volPath, exitCode, err = runCommandWithOutput(cmd) + if err != nil || exitCode != 0 { + t.Fatal("[inspect] err: %v, exitcode: %d", err, exitCode) + } + + cmd = exec.Command(dockerBinary, "rm", "-v", "test-volumewithsymlink") + exitCode, err = runCommand(cmd) + if err != nil || exitCode != 0 { + t.Fatal("[rm] err: %v, exitcode: %d", err, exitCode) + } + + f, err := os.Open(volPath) + defer f.Close() + if !os.IsNotExist(err) { + t.Fatal("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } + + deleteImages("docker-test-volumewithsymlink") + deleteAllContainers() + + logDone("run - volume with symlink") +} + func TestExitCode(t *testing.T) { cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") diff --git a/integration-cli/run_tests/TestVolumeWithSymlink/Dockerfile b/integration-cli/run_tests/TestVolumeWithSymlink/Dockerfile new file mode 100644 index 0000000000..46bed8540b --- /dev/null +++ b/integration-cli/run_tests/TestVolumeWithSymlink/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +RUN mkdir /foo && ln -s /foo /bar From ff7b52abd3f26d9650c2e674400d58fbe8157ad8 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 9 Apr 2014 21:50:46 -0400 Subject: [PATCH 078/219] Fixes permissions on volumes when dir in container is empty Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- daemon/volumes.go | 30 +++++++++++++++--------------- integration/container_test.go | 28 +++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/daemon/volumes.go b/daemon/volumes.go index 4a5c4475b7..19bb7cab3f 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -246,22 +246,22 @@ func createVolumes(container *Container) error { if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { return err } + } + } - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } + var stat syscall.Stat_t + if err := syscall.Stat(rootVolPath, &stat); err != nil { + return err + } + var srcStat syscall.Stat_t + if err := syscall.Stat(srcPath, &srcStat); err != nil { + return err + } + // Change the source volume's ownership if it differs from the root + // files that were just copied + if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { + if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err } } } diff --git a/integration/container_test.go b/integration/container_test.go index 67b2783ce9..8fe52a3cd6 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -407,7 +407,7 @@ func TestCopyVolumeUidGid(t *testing.T) { defer r.Nuke() // Add directory not owned by root - container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) + container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test && chown daemon.daemon /hello"}, t) defer r.Destroy(container1) if container1.State.IsRunning() { @@ -432,6 +432,32 @@ func TestCopyVolumeUidGid(t *testing.T) { if !strings.Contains(stdout1, "daemon daemon") { t.Fatal("Container failed to transfer uid and gid to volume") } + + container2, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && chown daemon.daemon /hello"}, t) + defer r.Destroy(container1) + + if container2.State.IsRunning() { + t.Errorf("Container shouldn't be running") + } + if err := container2.Run(); err != nil { + t.Fatal(err) + } + if container2.State.IsRunning() { + t.Errorf("Container shouldn't be running") + } + + img2, err := r.Commit(container2, "", "", "unit test commited image", "", nil) + if err != nil { + t.Error(err) + } + + // Test that the uid and gid is copied from the image to the volume + tmpDir2 := tempDir(t) + defer os.RemoveAll(tmpDir2) + stdout2, _ := runContainer(eng, r, []string{"-v", "/hello", img2.ID, "stat", "-c", "%U %G", "/hello"}, t) + if !strings.Contains(stdout2, "daemon daemon") { + t.Fatal("Container failed to transfer uid and gid to volume") + } } // Test for #1582 From d98069030dc842741fdff16e1818f2a34ec0167f Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 16:46:03 -0600 Subject: [PATCH 079/219] Remove "root" and "" special cases in libcontainer These are unnecessary since the user package handles these cases properly already (as evidenced by the LXC backend not having these special cases). I also updated the errors returned to match the other libcontainer error messages in this same file. Also, switching from Setresuid to Setuid directly isn't a problem, because the "setuid" system call will automatically do that if our own effective UID is root currently: (from `man 2 setuid`) setuid() sets the effective user ID of the calling process. If the effective UID of the caller is root, the real UID and saved set-user- ID are also set. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- pkg/libcontainer/nsinit/init.go | 37 +++++++++++---------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 67095fdba1..4e50bc513b 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -83,31 +83,18 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } func setupUser(container *libcontainer.Container) error { - switch container.User { - case "root", "": - if err := system.Setgroups(nil); err != nil { - return err - } - if err := system.Setresgid(0, 0, 0); err != nil { - return err - } - if err := system.Setresuid(0, 0, 0); err != nil { - return err - } - default: - uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) - if err != nil { - return err - } - if err := system.Setgroups(suppGids); err != nil { - return err - } - if err := system.Setgid(gid); err != nil { - return err - } - if err := system.Setuid(uid); err != nil { - return err - } + uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) + if err != nil { + return fmt.Errorf("GetUserGroupSupplementary %s", err) + } + if err := system.Setgroups(suppGids); err != nil { + return fmt.Errorf("setgroups %s", err) + } + if err := system.Setgid(gid); err != nil { + return fmt.Errorf("setgid %s", err) + } + if err := system.Setuid(uid); err != nil { + return fmt.Errorf("setuid %s", err) } return nil } From 76f95294a316c1b545abe1fd17536da74779490b Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Fri, 25 Apr 2014 00:37:58 +0000 Subject: [PATCH 080/219] Adding a unit test for pkg/cgroup/fs/memory.go Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/cgroups/fs/memory_test.go | 123 ++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 pkg/cgroups/fs/memory_test.go diff --git a/pkg/cgroups/fs/memory_test.go b/pkg/cgroups/fs/memory_test.go new file mode 100644 index 0000000000..6c1fb735e9 --- /dev/null +++ b/pkg/cgroups/fs/memory_test.go @@ -0,0 +1,123 @@ +package fs + +import ( + "testing" +) + +const ( + memoryStatContents = `cache 512 +rss 1024` + memoryUsageContents = "2048\n" + memoryMaxUsageContents = "4096\n" +) + +func TestMemoryStats(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &memoryGroup{} + stats, err := memory.Stats(helper.CgroupData) + if err != nil { + t.Fatal(err) + } + expectedStats := map[string]float64{"cache": 512.0, "rss": 1024.0, "usage_in_bytes": 2048.0, "max_usage_in_bytes": 4096.0} + expectStats(t, expectedStats, stats) +} + +func TestMemoryStatsNoStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsNoMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadStatFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": "rss rss", + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": "bad", + "memory.max_usage_in_bytes": memoryMaxUsageContents, + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} + +func TestMemoryStatsBadMaxUsageFile(t *testing.T) { + helper := NewCgroupTestUtil("memory", t) + defer helper.cleanup() + helper.writeFileContents(map[string]string{ + "memory.stat": memoryStatContents, + "memory.usage_in_bytes": memoryUsageContents, + "memory.max_usage_in_bytes": "bad", + }) + + memory := &memoryGroup{} + _, err := memory.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failure") + } +} From 314818e7ba4c675b63caaaabadcfd12d10c4f3a6 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 17:08:03 -0600 Subject: [PATCH 081/219] Update the "cgroup-lite" dep in our deb package from "Suggests" to "Recommends" and add "cgroupfs-mount" as another alternative I tested to verify that if neither package is available (for example, on Debian Wheezy), apt still continues installing properly. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make/ubuntu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/make/ubuntu b/hack/make/ubuntu index ae0f8d6137..751eacf868 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -135,7 +135,7 @@ EOF --deb-recommends ca-certificates \ --deb-recommends git \ --deb-recommends xz-utils \ - --deb-suggests cgroup-lite \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts docker \ From 61f156d5215b2c9d38e26bbd732c6e9cb9a3208e Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Tue, 29 Apr 2014 00:18:18 +0000 Subject: [PATCH 082/219] Add cpu throttling stats. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/cgroups/fs/cpu.go | 27 ++++++++++++++++++++++--- pkg/cgroups/fs/cpu_test.go | 40 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 pkg/cgroups/fs/cpu_test.go diff --git a/pkg/cgroups/fs/cpu.go b/pkg/cgroups/fs/cpu.go index 2664811851..6a7f66c72d 100644 --- a/pkg/cgroups/fs/cpu.go +++ b/pkg/cgroups/fs/cpu.go @@ -1,6 +1,9 @@ package fs import ( + "bufio" + "os" + "path/filepath" "strconv" ) @@ -37,7 +40,25 @@ func (s *cpuGroup) Remove(d *data) error { } func (s *cpuGroup) Stats(d *data) (map[string]float64, error) { - // we can reuse the cpuacct subsystem to get the cpu stats - sys := subsystems["cpuacct"] - return sys.Stats(d) + paramData := make(map[string]float64) + path, err := d.path("cpu") + if err != nil { + return nil, err + } + + f, err := os.Open(filepath.Join(path, "cpu.stat")) + if err != nil { + return nil, err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + t, v, err := getCgroupParamKeyValue(sc.Text()) + if err != nil { + return nil, err + } + paramData[t] = v + } + return paramData, nil } diff --git a/pkg/cgroups/fs/cpu_test.go b/pkg/cgroups/fs/cpu_test.go new file mode 100644 index 0000000000..ed9c0defd2 --- /dev/null +++ b/pkg/cgroups/fs/cpu_test.go @@ -0,0 +1,40 @@ +package fs + +import ( + "testing" +) + +func TestCpuStats(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time 42424242424` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &cpuGroup{} + stats, err := cpu.Stats(helper.CgroupData) + if err != nil { + t.Fatal(err) + } + + expected_stats := map[string]float64{ + "nr_periods": 2000.0, + "nr_throttled": 200.0, + "throttled_time": 42424242424.0, + } + expectStats(t, expected_stats, stats) +} + +func TestNoCpuStatFile(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + + cpu := &cpuGroup{} + _, err := cpu.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected to fail, but did not.") + } +} From d724242297bf2981ad9c7745e5b130ab7fa8f067 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Tue, 29 Apr 2014 00:32:05 +0000 Subject: [PATCH 083/219] Another test to check for invalid stats. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/cgroups/fs/cpu_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pkg/cgroups/fs/cpu_test.go b/pkg/cgroups/fs/cpu_test.go index ed9c0defd2..698ae921d8 100644 --- a/pkg/cgroups/fs/cpu_test.go +++ b/pkg/cgroups/fs/cpu_test.go @@ -38,3 +38,20 @@ func TestNoCpuStatFile(t *testing.T) { t.Fatal("Expected to fail, but did not.") } } + +func TestInvalidCpuStat(t *testing.T) { + helper := NewCgroupTestUtil("cpu", t) + defer helper.cleanup() + cpuStatContent := `nr_periods 2000 + nr_throttled 200 + throttled_time fortytwo` + helper.writeFileContents(map[string]string{ + "cpu.stat": cpuStatContent, + }) + + cpu := &cpuGroup{} + _, err := cpu.Stats(helper.CgroupData) + if err == nil { + t.Fatal("Expected failed stat parsing.") + } +} From 33f36177e9648b182415a7928d3b4b5dc9d0265e Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 29 Apr 2014 13:33:22 +1000 Subject: [PATCH 084/219] add redirects from index/ -> docker-io/ and for the docker-io_api too Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/s3_website.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/s3_website.json b/docs/s3_website.json index bb68b6652c..89e3ecd3cf 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -10,6 +10,8 @@ { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } } + { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, ] } From 17fbe3de381c70ef852f8d1c50128299615d1ebe Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 28 Apr 2014 18:45:03 +1000 Subject: [PATCH 085/219] remove the sphinx validation - we'll add a MarkDown one when we have it Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 832a8dd477..ae03d6cde5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,11 +10,9 @@ install: true before_script: - env | sort - - sudo pip install -r docs/requirements.txt script: - hack/make.sh validate-dco - hack/make.sh validate-gofmt - - make -sC docs SPHINXOPTS=-qW docs man # vim:set sw=2 ts=2: From e4114e6b946dfc0f433fa0f6a9de42f80656ce08 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 22:22:31 -0600 Subject: [PATCH 086/219] Update some whitespace in hack/make/test-integration-cli for consistency Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make/test-integration-cli | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 703c9cd95c..92d1373f59 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -13,12 +13,12 @@ bundle_test_integration_cli() { # subshell so that we can export PATH without breaking other things ( export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" - + if ! command -v docker &> /dev/null; then echo >&2 'error: binary or dynbinary must be run before test-integration-cli' false fi - + ( set -x; exec \ docker --daemon --debug \ --storage-driver "$DOCKER_GRAPHDRIVER" \ @@ -26,10 +26,10 @@ bundle_test_integration_cli() { --pidfile "$DEST/docker.pid" \ &> "$DEST/docker.log" ) & - + # pull the busybox image before running the tests sleep 2 - + if ! docker inspect busybox &> /dev/null; then if [ -d /docker-busybox ]; then ( set -x; docker build -t busybox /docker-busybox ) @@ -39,7 +39,7 @@ bundle_test_integration_cli() { fi bundle_test_integration_cli - + DOCKERD_PID=$(set -x; cat $DEST/docker.pid) ( set -x; kill $DOCKERD_PID ) wait $DOCKERD_PID || true From 46492ee65a26c8b3a138f4b9f5eea0e648dc8b45 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Mon, 28 Apr 2014 21:25:26 -0700 Subject: [PATCH 087/219] Remove obsolete 'bootcamp' project. It was a nice idea to recruit more maintainers but we never found the time to do it properly... I am still interested in any ideas to make it easier to start contributing! Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- hack/bootcamp/README.md | 91 ----------------------------------------- 1 file changed, 91 deletions(-) delete mode 100644 hack/bootcamp/README.md diff --git a/hack/bootcamp/README.md b/hack/bootcamp/README.md deleted file mode 100644 index 2c3d356daf..0000000000 --- a/hack/bootcamp/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Docker maintainer bootcamp - -## Introduction: we need more maintainers - -Docker is growing incredibly fast. At the time of writing, it has received over 200 contributions from 90 people, -and its API is used by dozens of 3rd-party tools. Over 1,000 issues have been opened. As the first production deployments -start going live, the growth will only accelerate. - -Also at the time of writing, Docker has 3 full-time maintainers, and 7 part-time subsystem maintainers. If docker -is going to live up to the expectations, we need more than that. - -This document describes a *bootcamp* to guide and train volunteers interested in helping the project, either with individual -contributions, maintainer work, or both. - -This bootcamp is an experiment. If you decide to go through it, consider yourself an alpha-tester. You should expect quirks, -and report them to us as you encounter them to help us smooth out the process. - - -## How it works - -The maintainer bootcamp is a 12-step program - one step for each of the maintainer's responsibilities. The aspiring maintainer must -validate all 12 steps by 1) studying it, 2) practicing it, and 3) getting endorsed for it. - -Steps are all equally important and can be validated in any order. Validating all 12 steps is a pre-requisite for becoming a core -maintainer, but even 1 step will make you a better contributor! - -### List of steps - -#### 1) Be a power user - -Use docker daily, build cool things with it, know its quirks inside and out. - - -#### 2) Help users - -Answer questions on irc, twitter, email, in person. - - -#### 3) Manage the bug tracker - -Help triage tickets - ask the right questions, find duplicates, reference relevant resources, know when to close a ticket when necessary, take the time to go over older tickets. - - -#### 4) Improve the documentation - -Follow the documentation from scratch regularly and make sure it is still up-to-date. Find and fix inconsistencies. Remove stale information. Find a frequently asked question that is not documented. Simplify the content and the form. - - -#### 5) Evangelize the principles of docker - -Understand what the underlying goals and principle of docker are. Explain design decisions based on what docker is, and what it is not. When someone is not using docker, find how docker can be valuable to them. If they are using docker, find how they can use it better. - - -#### 6) Fix bugs - -Self-explanatory. Contribute improvements to docker which solve defects. Bugfixes should be well-tested, and prioritized by impact to the user. - - -#### 7) Improve the testing infrastructure - -Automated testing is complicated and should be perpetually improved. Invest time to improve the current tooling. Refactor existing tests, create new ones, make testing more accessible to developers, add new testing capabilities (integration tests, mocking, stress test...), improve integration between tests and documentation... - - -#### 8) Contribute features - -Improve docker to do more things, or get better at doing the same things. Features should be well-tested, not break existing APIs, respect the project goals. They should make the user's life measurably better. Features should be discussed ahead of time to avoid wasting time and duplicating effort. - - -#### 9) Refactor internals - -Improve docker to repay technical debt. Simplify code layout, improve performance, add missing comments, reduce the number of files and functions, rename functions and variables to be more readable, go over FIXMEs, etc. - -#### 10) Review and merge contributions - -Review pull requests in a timely manner, review code in detail and offer feedback. Keep a high bar without being pedantic. Share the load of testing and merging pull requests. - -#### 11) Release - -Manage a release of docker from beginning to end. Tests, final review, tags, builds, upload to mirrors, distro packaging, etc. - -#### 12) Train other maintainers - -Contribute to training other maintainers. Give advice, delegate work, help organize the bootcamp. This also means contribute to the maintainer's manual, look for ways to improve the project organization etc. - -### How to study a step - -### How to practice a step - -### How to get endorsed for a step - - From c8381d672226dd17f2e17f9cf9378f06d533911c Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 22:23:22 -0600 Subject: [PATCH 088/219] Use "docker load" to create "scratch" in hack/make/test-integration-cli (instead of implicitly pulling it from the index) Creating the "docker save" tarball for "scratch" is pretty simple. I've also extrapolated the "docker build -t busybox ." logic into a separate "hack/make/.ensure-busybox" file so that it can eventually be reused easier. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make/.ensure-busybox | 10 ++++++++++ hack/make/.ensure-scratch | 21 +++++++++++++++++++++ hack/make/test-integration-cli | 8 +------- 3 files changed, 32 insertions(+), 7 deletions(-) create mode 100644 hack/make/.ensure-busybox create mode 100644 hack/make/.ensure-scratch diff --git a/hack/make/.ensure-busybox b/hack/make/.ensure-busybox new file mode 100644 index 0000000000..3861faaf11 --- /dev/null +++ b/hack/make/.ensure-busybox @@ -0,0 +1,10 @@ +#!/bin/bash + +if ! docker inspect busybox &> /dev/null; then + if [ -d /docker-busybox ]; then + source "$(dirname "$BASH_SOURCE")/.ensure-scratch" + ( set -x; docker build -t busybox /docker-busybox ) + else + ( set -x; docker pull busybox ) + fi +fi diff --git a/hack/make/.ensure-scratch b/hack/make/.ensure-scratch new file mode 100644 index 0000000000..487e85ae27 --- /dev/null +++ b/hack/make/.ensure-scratch @@ -0,0 +1,21 @@ +#!/bin/bash + +if ! docker inspect scratch &> /dev/null; then + # let's build a "docker save" tarball for "scratch" + # see https://github.com/dotcloud/docker/pull/5262 + # and also https://github.com/dotcloud/docker/issues/4242 + mkdir -p /docker-scratch + ( + cd /docker-scratch + echo '{"scratch":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cf /docker-scratch.tar -C /docker-scratch . ) + ( set -x; docker load --input /docker-scratch.tar ) +fi diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 92d1373f59..adb9be024c 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -30,13 +30,7 @@ bundle_test_integration_cli() { # pull the busybox image before running the tests sleep 2 - if ! docker inspect busybox &> /dev/null; then - if [ -d /docker-busybox ]; then - ( set -x; docker build -t busybox /docker-busybox ) - else - ( set -x; docker pull busybox ) - fi - fi + source "$(dirname "$BASH_SOURCE")/.ensure-busybox" bundle_test_integration_cli From b1fe1797f3a808f38cec3d0b9a22556646163441 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 23:13:25 -0600 Subject: [PATCH 089/219] Update hack/dind to match the rest of our scripts No functional changes here, just coding style and maintainability. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/dind | 60 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/hack/dind b/hack/dind index 94147f5324..06b847f530 100755 --- a/hack/dind +++ b/hack/dind @@ -1,4 +1,5 @@ #!/bin/bash +set -e # DinD: a wrapper script which allows docker to be run inside a docker container. # Original version by Jerome Petazzoni @@ -12,29 +13,28 @@ # First, make sure that cgroups are mounted correctly. CGROUP=/sys/fs/cgroup -[ -d $CGROUP ] || - mkdir $CGROUP +mkdir -p "$CGROUP" -mountpoint -q $CGROUP || +if ! mountpoint -q "$CGROUP"; then mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo "Could not make a tmpfs mount. Did you use --privileged?" + echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' exit 1 } +fi -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security -then - mount -t securityfs none /sys/kernel/security || { - echo "Could not mount /sys/kernel/security." - echo "AppArmor detection and -privileged mode might break." - } +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and -privileged mode might break.' + } fi # Mount the cgroup hierarchies exactly as they are in the parent system. -for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) -do - [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS - mountpoint -q $CGROUP/$SUBSYS || - mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS +for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do + mkdir -p "$CGROUP/$SUBSYS" + if ! mountpoint -q $CGROUP/$SUBSYS; then + mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" + fi # The two following sections address a bug which manifests itself # by a cryptic "lxc-start: no ns_cgroup option specified" when @@ -49,26 +49,30 @@ do # Systemd and OpenRC (and possibly others) both create such a # cgroup. To avoid the aforementioned bug, we symlink "foo" to # "name=foo". This shouldn't have any adverse effect. - echo $SUBSYS | grep -q ^name= && { - NAME=$(echo $SUBSYS | sed s/^name=//) - ln -s $SUBSYS $CGROUP/$NAME - } + name="${SUBSYS#name=}" + if [ "$name" != "$SUBSYS" ]; then + ln -s "$SUBSYS" "$CGROUP/$name" + fi # Likewise, on at least one system, it has been reported that # systemd would mount the CPU and CPU accounting controllers # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" # but on a directory called "cpu,cpuacct" (note the inversion # in the order of the groups). This tries to work around it. - [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct + if [ "$SUBSYS" = 'cpuacct,cpu' ]; then + ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" + fi done # Note: as I write those lines, the LXC userland tools cannot setup # a "sub-container" properly if the "devices" cgroup is not in its # own hierarchy. Let's detect this and issue a warning. -grep -q :devices: /proc/1/cgroup || - echo "WARNING: the 'devices' cgroup should be in its own hierarchy." -grep -qw devices /proc/1/cgroup || - echo "WARNING: it looks like the 'devices' cgroup is not mounted." +if ! grep -q :devices: /proc/1/cgroup; then + echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' +fi +if ! grep -qw devices /proc/1/cgroup; then + echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' +fi # Now, close extraneous file descriptors. pushd /proc/self/fd >/dev/null @@ -89,5 +93,9 @@ popd >/dev/null # Mount /tmp mount -t tmpfs none /tmp -[ "$1" ] && exec "$@" -echo "You probably want to run hack/make.sh, or maybe a shell?" +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' From 44d54ba0c299540efbfa173bf484d541e857f4ac Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 29 Apr 2014 02:01:07 -0700 Subject: [PATCH 090/219] Use proper scheme with static registry Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- registry/registry.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 88defdc7bb..1bd73cdeb5 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net" @@ -17,6 +16,8 @@ import ( "strconv" "strings" "time" + + "github.com/dotcloud/docker/utils" ) var ( @@ -372,7 +373,11 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { } } else { // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, req.URL.Host)) + u, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", u.Scheme, req.URL.Host)) } checksumsJSON, err := ioutil.ReadAll(res.Body) From f0e6e135a8d733af173bf0b8732c704c9ec716d7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 17 Apr 2014 23:47:27 +0000 Subject: [PATCH 091/219] Initial work on selinux patch This has every container using the docker daemon's pid for the processes label so it does not work correctly. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 20 ++++-- daemon/daemon.go | 15 ++-- daemon/execdriver/native/create.go | 12 +--- daemon/graphdriver/aufs/aufs.go | 31 ++++---- daemon/graphdriver/aufs/aufs_test.go | 78 ++++++++++----------- daemon/graphdriver/aufs/migrate.go | 8 +-- daemon/graphdriver/btrfs/btrfs.go | 10 +-- daemon/graphdriver/devmapper/deviceset.go | 8 ++- daemon/graphdriver/devmapper/driver.go | 4 +- daemon/graphdriver/devmapper/driver_test.go | 45 ++++++------ daemon/graphdriver/driver.go | 4 +- daemon/graphdriver/vfs/driver.go | 6 +- daemon/volumes.go | 2 +- daemonconfig/config.go | 3 +- docker/docker.go | 2 + graph/graph.go | 8 +-- image/image.go | 6 +- integration/graph_test.go | 2 +- integration/runtime_test.go | 4 +- pkg/label/label_selinux.go | 8 +-- pkg/libcontainer/nsinit/init.go | 3 +- 21 files changed, 148 insertions(+), 131 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index c06fd2c074..4416a4c212 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -11,6 +11,7 @@ import ( "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/pkg/selinux" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" @@ -64,7 +65,8 @@ type Container struct { stdin io.ReadCloser stdinPipe io.WriteCloser - daemon *Daemon + daemon *Daemon + mountLabel, processLabel string waitLock chan struct{} Volumes map[string]string @@ -320,9 +322,11 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s func populateCommand(c *Container, env []string) { var ( - en *execdriver.Network - driverConfig = make(map[string][]string) + en *execdriver.Network + context = make(map[string][]string) ) + context["process_label"] = []string{c.processLabel} + context["mount_label"] = []string{c.mountLabel} en = &execdriver.Network{ Mtu: c.daemon.config.Mtu, @@ -340,7 +344,7 @@ func populateCommand(c *Container, env []string) { } // TODO: this can be removed after lxc-conf is fully deprecated - mergeLxcConfIntoOptions(c.hostConfig, driverConfig) + mergeLxcConfIntoOptions(c.hostConfig, context) resources := &execdriver.Resources{ Memory: c.Config.Memory, @@ -358,7 +362,7 @@ func populateCommand(c *Container, env []string) { Network: en, Tty: c.Config.Tty, User: c.Config.User, - Config: driverConfig, + Config: context, Resources: resources, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} @@ -383,6 +387,12 @@ func (container *Container) Start() (err error) { if err := container.setupContainerDns(); err != nil { return err } + + process, mount := selinux.GetLxcContexts() + + container.mountLabel = mount + container.processLabel = process + if err := container.Mount(); err != nil { return err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 0e4d1a1699..50707fbc98 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -543,10 +543,10 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error return err } initID := fmt.Sprintf("%s-init", container.ID) - if err := daemon.driver.Create(initID, img.ID, ""); err != nil { + if err := daemon.driver.Create(initID, img.ID); err != nil { return err } - initPath, err := daemon.driver.Get(initID) + initPath, err := daemon.driver.Get(initID, "") if err != nil { return err } @@ -556,7 +556,7 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error return err } - if err := daemon.driver.Create(container.ID, initID, ""); err != nil { + if err := daemon.driver.Create(container.ID, initID); err != nil { return err } return nil @@ -670,7 +670,6 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D if !config.EnableSelinuxSupport { selinux.SetDisabled() } - // Set the default driver graphdriver.DefaultDriver = config.GraphDriver @@ -840,7 +839,7 @@ func (daemon *Daemon) Close() error { } func (daemon *Daemon) Mount(container *Container) error { - dir, err := daemon.driver.Get(container.ID) + dir, err := daemon.driver.Get(container.ID, container.mountLabel) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) } @@ -862,12 +861,12 @@ func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { if differ, ok := daemon.driver.(graphdriver.Differ); ok { return differ.Changes(container.ID) } - cDir, err := daemon.driver.Get(container.ID) + cDir, err := daemon.driver.Get(container.ID, "") if err != nil { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err) } defer daemon.driver.Put(container.ID) - initDir, err := daemon.driver.Get(container.ID + "-init") + initDir, err := daemon.driver.Get(container.ID+"-init", "") if err != nil { return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err) } @@ -885,7 +884,7 @@ func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { return nil, err } - cDir, err := daemon.driver.Get(container.ID) + cDir, err := daemon.driver.Get(container.ID, "") if err != nil { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.daemon.driver, err) } diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index f724ef67e6..00e6fc4b26 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -8,7 +8,6 @@ import ( "github.com/dotcloud/docker/daemon/execdriver/native/configuration" "github.com/dotcloud/docker/daemon/execdriver/native/template" "github.com/dotcloud/docker/pkg/apparmor" - "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" ) @@ -119,14 +118,7 @@ func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Co } func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error { - labels := c.Config["label"] - if len(labels) > 0 { - process, mount, err := label.GenLabels(labels[0]) - if err != nil { - return err - } - container.Context["mount_label"] = mount - container.Context["process_label"] = process - } + container.Context["process_label"] = c.Config["process_label"][0] + container.Context["mount_label"] = c.Config["mount_label"][0] return nil } diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go index 8363c24a5e..12b7a77fb3 100644 --- a/daemon/graphdriver/aufs/aufs.go +++ b/daemon/graphdriver/aufs/aufs.go @@ -25,6 +25,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/pkg/label" mountpk "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "os" @@ -134,7 +135,7 @@ func (a Driver) Exists(id string) bool { // Three folders are created for each id // mnt, layers, and diff -func (a *Driver) Create(id, parent string, mountLabel string) error { +func (a *Driver) Create(id, parent string) error { if err := a.createDirsFor(id); err != nil { return err } @@ -218,7 +219,7 @@ func (a *Driver) Remove(id string) error { // Return the rootfs path for the id // This will mount the dir at it's given path -func (a *Driver) Get(id string) (string, error) { +func (a *Driver) Get(id, mountLabel string) (string, error) { ids, err := getParentIds(a.rootPath(), id) if err != nil { if !os.IsNotExist(err) { @@ -240,7 +241,7 @@ func (a *Driver) Get(id string) (string, error) { out = path.Join(a.rootPath(), "mnt", id) if count == 0 { - if err := a.mount(id); err != nil { + if err := a.mount(id, mountLabel); err != nil { return "", err } } @@ -309,7 +310,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) { return layers, nil } -func (a *Driver) mount(id string) error { +func (a *Driver) mount(id, mountLabel string) error { // If the id is mounted or we get an error return if mounted, err := a.mounted(id); err != nil || mounted { return err @@ -325,7 +326,7 @@ func (a *Driver) mount(id string) error { return err } - if err := a.aufsMount(layers, rw, target); err != nil { + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { return err } return nil @@ -358,21 +359,21 @@ func (a *Driver) Cleanup() error { return nil } -func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { defer func() { if err != nil { Unmount(target) } }() - if err = a.tryMount(ro, rw, target); err != nil { - if err = a.mountRw(rw, target); err != nil { + if err = a.tryMount(ro, rw, target, mountLabel); err != nil { + if err = a.mountRw(rw, target, mountLabel); err != nil { return } for _, layer := range ro { - branch := fmt.Sprintf("append:%s=ro+wh", layer) - if err = mount("none", target, "aufs", MsRemount, branch); err != nil { + data := label.FormatMountLabel(fmt.Sprintf("append:%s=ro+wh", layer), mountLabel) + if err = mount("none", target, "aufs", MsRemount, data); err != nil { return } } @@ -382,16 +383,18 @@ func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { // Try to mount using the aufs fast path, if this fails then // append ro layers. -func (a *Driver) tryMount(ro []string, rw, target string) (err error) { +func (a *Driver) tryMount(ro []string, rw, target, mountLabel string) (err error) { var ( rwBranch = fmt.Sprintf("%s=rw", rw) roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) + data = label.FormatMountLabel(fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches), mountLabel) ) - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)) + return mount("none", target, "aufs", 0, data) } -func (a *Driver) mountRw(rw, target string) error { - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw)) +func (a *Driver) mountRw(rw, target, mountLabel string) error { + data := label.FormatMountLabel(fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw), mountLabel) + return mount("none", target, "aufs", 0, data) } func rollbackMount(target string, err error) { diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go index 9e01a945aa..1ffa264aa1 100644 --- a/daemon/graphdriver/aufs/aufs_test.go +++ b/daemon/graphdriver/aufs/aufs_test.go @@ -90,7 +90,7 @@ func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } } @@ -99,7 +99,7 @@ func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -120,7 +120,7 @@ func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -145,11 +145,11 @@ func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - diffPath, err := d.Get("1") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -172,7 +172,7 @@ func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -185,7 +185,7 @@ func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -204,14 +204,14 @@ func TestMountedTrueReponse(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } - _, err := d.Get("2") + _, err := d.Get("2", "") if err != nil { t.Fatal(err) } @@ -230,10 +230,10 @@ func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } @@ -243,7 +243,7 @@ func TestMountWithParent(t *testing.T) { } }() - mntPath, err := d.Get("2") + mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } @@ -261,10 +261,10 @@ func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } @@ -274,7 +274,7 @@ func TestRemoveMountedDir(t *testing.T) { } }() - mntPath, err := d.Get("2") + mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } @@ -300,7 +300,7 @@ func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "docker", ""); err == nil { + if err := d.Create("1", "docker"); err == nil { t.Fatalf("Error should not be nil with parent does not exist") } } @@ -309,11 +309,11 @@ func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - diffPath, err := d.Get("1") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -343,10 +343,10 @@ func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } @@ -356,7 +356,7 @@ func TestChanges(t *testing.T) { } }() - mntPoint, err := d.Get("2") + mntPoint, err := d.Get("2", "") if err != nil { t.Fatal(err) } @@ -392,10 +392,10 @@ func TestChanges(t *testing.T) { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } - if err := d.Create("3", "2", ""); err != nil { + if err := d.Create("3", "2"); err != nil { t.Fatal(err) } - mntPoint, err = d.Get("3") + mntPoint, err = d.Get("3", "") if err != nil { t.Fatal(err) } @@ -437,11 +437,11 @@ func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - diffPath, err := d.Get("1") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -479,11 +479,11 @@ func TestChildDiffSize(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - diffPath, err := d.Get("1") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -515,7 +515,7 @@ func TestChildDiffSize(t *testing.T) { t.Fatalf("Expected size to be %d got %d", size, diffSize) } - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } @@ -534,7 +534,7 @@ func TestExists(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -552,7 +552,7 @@ func TestStatus(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -581,11 +581,11 @@ func TestApplyDiff(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - diffPath, err := d.Get("1") + diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } @@ -607,10 +607,10 @@ func TestApplyDiff(t *testing.T) { t.Fatal(err) } - if err := d.Create("2", "", ""); err != nil { + if err := d.Create("2", ""); err != nil { t.Fatal(err) } - if err := d.Create("3", "2", ""); err != nil { + if err := d.Create("3", "2"); err != nil { t.Fatal(err) } @@ -620,7 +620,7 @@ func TestApplyDiff(t *testing.T) { // Ensure that the file is in the mount point for id 3 - mountPoint, err := d.Get("3") + mountPoint, err := d.Get("3", "") if err != nil { t.Fatal(err) } @@ -656,11 +656,11 @@ func TestMountMoreThan42Layers(t *testing.T) { } current = hash(current) - if err := d.Create(current, parent, ""); err != nil { + if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } - point, err := d.Get(current) + point, err := d.Get(current, "") if err != nil { t.Logf("Current layer %d", i) t.Fatal(err) @@ -683,7 +683,7 @@ func TestMountMoreThan42Layers(t *testing.T) { } // Perform the actual mount for the top most image - point, err := d.Get(last) + point, err := d.Get(last, "") if err != nil { t.Fatal(err) } diff --git a/daemon/graphdriver/aufs/migrate.go b/daemon/graphdriver/aufs/migrate.go index 400e260797..dda7cb7390 100644 --- a/daemon/graphdriver/aufs/migrate.go +++ b/daemon/graphdriver/aufs/migrate.go @@ -77,11 +77,11 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e } initID := fmt.Sprintf("%s-init", id) - if err := a.Create(initID, metadata.Image, ""); err != nil { + if err := a.Create(initID, metadata.Image); err != nil { return err } - initPath, err := a.Get(initID) + initPath, err := a.Get(initID, "") if err != nil { return err } @@ -90,7 +90,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e return err } - if err := a.Create(id, initID, ""); err != nil { + if err := a.Create(id, initID); err != nil { return err } } @@ -144,7 +144,7 @@ func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) return err } if !a.Exists(m.ID) { - if err := a.Create(m.ID, m.ParentID, ""); err != nil { + if err := a.Create(m.ID, m.ParentID); err != nil { return err } } diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go index 494a375817..4d195537eb 100644 --- a/daemon/graphdriver/btrfs/btrfs.go +++ b/daemon/graphdriver/btrfs/btrfs.go @@ -80,7 +80,7 @@ func getDirFd(dir *C.DIR) uintptr { return uintptr(C.dirfd(dir)) } -func subvolCreate(path, name string, mountLabel string) error { +func subvolCreate(path, name string) error { dir, err := openDir(path) if err != nil { return err @@ -155,17 +155,17 @@ func (d *Driver) subvolumesDirId(id string) string { return path.Join(d.subvolumesDir(), id) } -func (d *Driver) Create(id string, parent string, mountLabel string) error { +func (d *Driver) Create(id string, parent string) error { subvolumes := path.Join(d.home, "subvolumes") if err := os.MkdirAll(subvolumes, 0700); err != nil { return err } if parent == "" { - if err := subvolCreate(subvolumes, id, mountLabel); err != nil { + if err := subvolCreate(subvolumes, id); err != nil { return err } } else { - parentDir, err := d.Get(parent) + parentDir, err := d.Get(parent, "") if err != nil { return err } @@ -187,7 +187,7 @@ func (d *Driver) Remove(id string) error { return os.RemoveAll(dir) } -func (d *Driver) Get(id string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.subvolumesDirId(id) st, err := os.Stat(dir) if err != nil { diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index 640bebd32b..a562210e55 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -845,7 +845,7 @@ func (devices *DeviceSet) Shutdown() error { return nil } -func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error { +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info, err := devices.lookupDevice(hash) if err != nil { return err @@ -858,6 +858,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro defer devices.Unlock() if info.mountCount > 0 { + fmt.Printf("---> already mounted\n") if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) } @@ -873,9 +874,12 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro var flags uintptr = sysMsMgcVal mountOptions := label.FormatMountLabel("discard", mountLabel) + fmt.Printf("-----> setting mount label %s\n", mountOptions) + err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) if err != nil && err == sysEInval { - mountOptions = label.FormatMountLabel(mountLabel, "") + mountOptions = label.FormatMountLabel("", mountLabel) + fmt.Printf("-----> setting mount label after error %s\n", mountOptions) err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) } if err != nil { diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 66c4cb0767..558feef327 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -60,7 +60,7 @@ func (d *Driver) Cleanup() error { return d.DeviceSet.Shutdown() } -func (d *Driver) Create(id, parent string, mountLabel string) error { +func (d *Driver) Create(id, parent string) error { if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } @@ -89,7 +89,7 @@ func (d *Driver) Remove(id string) error { return nil } -func (d *Driver) Get(id string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) // Create the target directories if they don't exist diff --git a/daemon/graphdriver/devmapper/driver_test.go b/daemon/graphdriver/devmapper/driver_test.go index 77e8a6013a..913add7c8b 100644 --- a/daemon/graphdriver/devmapper/driver_test.go +++ b/daemon/graphdriver/devmapper/driver_test.go @@ -436,6 +436,12 @@ func TestDriverCreate(t *testing.T) { return nil } + sysUnmount = func(target string, flag int) error { + //calls["sysUnmount"] = true + + return nil + } + Mounted = func(mnt string) (bool, error) { calls["Mounted"] = true if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { @@ -494,7 +500,7 @@ func TestDriverCreate(t *testing.T) { "?ioctl.loopctlgetfree", ) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } calls.Assert(t, @@ -542,7 +548,6 @@ func TestDriverRemove(t *testing.T) { return nil } sysUnmount = func(target string, flags int) (err error) { - calls["sysUnmount"] = true // FIXME: compare the exact source and target strings (inodes + devname) if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) @@ -607,7 +612,7 @@ func TestDriverRemove(t *testing.T) { "?ioctl.loopctlgetfree", ) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -657,21 +662,21 @@ func TestCleanup(t *testing.T) { mountPoints := make([]string, 2) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } // Mount the id - p, err := d.Get("1") + p, err := d.Get("1", "") if err != nil { t.Fatal(err) } mountPoints[0] = p - if err := d.Create("2", "1", ""); err != nil { + if err := d.Create("2", "1"); err != nil { t.Fatal(err) } - p, err = d.Get("2") + p, err = d.Get("2", "") if err != nil { t.Fatal(err) } @@ -720,7 +725,7 @@ func TestNotMounted(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -738,10 +743,10 @@ func TestMounted(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } @@ -758,10 +763,10 @@ func TestInitCleanedDriver(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } @@ -776,7 +781,7 @@ func TestInitCleanedDriver(t *testing.T) { d = driver.(*Driver) defer cleanup(d) - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } } @@ -786,16 +791,16 @@ func TestMountMountedDriver(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } // Perform get on same id to ensure that it will // not be mounted twice - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } } @@ -805,7 +810,7 @@ func TestGetReturnsValidDevice(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } @@ -813,7 +818,7 @@ func TestGetReturnsValidDevice(t *testing.T) { t.Fatalf("Expected id 1 to be in device set") } - if _, err := d.Get("1"); err != nil { + if _, err := d.Get("1", ""); err != nil { t.Fatal(err) } @@ -833,11 +838,11 @@ func TestDriverGetSize(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", "", ""); err != nil { + if err := d.Create("1", ""); err != nil { t.Fatal(err) } - mountPoint, err := d.Get("1") + mountPoint, err := d.Get("1", "") if err != nil { t.Fatal(err) } diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index bd4c2faaca..80bf8a0143 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -13,10 +13,10 @@ type InitFunc func(root string) (Driver, error) type Driver interface { String() string - Create(id, parent string, mountLabel string) error + Create(id, parent string) error Remove(id string) error - Get(id string) (dir string, err error) + Get(id, mountLabel string) (dir string, err error) Put(id string) Exists(id string) bool diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index 40acde7b75..765b21cded 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -42,7 +42,7 @@ func copyDir(src, dst string) error { return nil } -func (d *Driver) Create(id string, parent string, mountLabel string) error { +func (d *Driver) Create(id, parent string) error { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err @@ -53,7 +53,7 @@ func (d *Driver) Create(id string, parent string, mountLabel string) error { if parent == "" { return nil } - parentDir, err := d.Get(parent) + parentDir, err := d.Get(parent, "") if err != nil { return fmt.Errorf("%s: %s", parent, err) } @@ -74,7 +74,7 @@ func (d *Driver) Remove(id string) error { return os.RemoveAll(d.dir(id)) } -func (d *Driver) Get(id string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { return "", err diff --git a/daemon/volumes.go b/daemon/volumes.go index d51219d226..a6570845bf 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -204,7 +204,7 @@ func createVolumes(container *Container) error { if err != nil { return err } - srcPath, err = volumesDriver.Get(c.ID) + srcPath, err = volumesDriver.Get(c.ID, "") if err != nil { return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) } diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 2803f827f4..87ac52224d 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -29,6 +29,7 @@ type Config struct { Mtu int DisableNetwork bool EnableSelinuxSupport bool + Context map[string][]string } // ConfigFromJob creates and returns a new DaemonConfig object @@ -46,7 +47,7 @@ func ConfigFromJob(job *engine.Job) *Config { InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), ExecDriver: job.Getenv("ExecDriver"), - EnableSelinuxSupport: false, // FIXME: hardcoded default to disable selinux for .10 release + EnableSelinuxSupport: job.GetenvBool("SelinuxEnabled"), } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns diff --git a/docker/docker.go b/docker/docker.go index 4d90ab8b2e..ce3c54dacd 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -64,6 +64,7 @@ func main() { flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here") flCert = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file") flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file") + flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") @@ -148,6 +149,7 @@ func main() { job.Setenv("GraphDriver", *flGraphDriver) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) + job.SetenvBool("SelinuxEnabled", *flSelinuxEnabled) if err := job.Run(); err != nil { log.Fatal(err) } diff --git a/graph/graph.go b/graph/graph.go index 5c3a94bab7..b889139121 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -98,7 +98,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) { img.SetGraph(graph) if img.Size < 0 { - rootfs, err := graph.driver.Get(img.ID) + rootfs, err := graph.driver.Get(img.ID, "") if err != nil { return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } @@ -110,7 +110,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) { return nil, err } } else { - parentFs, err := graph.driver.Get(img.Parent) + parentFs, err := graph.driver.Get(img.Parent, "") if err != nil { return nil, err } @@ -191,11 +191,11 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i } // Create root filesystem in the driver - if err := graph.driver.Create(img.ID, img.Parent, ""); err != nil { + if err := graph.driver.Create(img.ID, img.Parent); err != nil { return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) } // Mount the root filesystem so we can apply the diff/layer - rootfs, err := graph.driver.Get(img.ID) + rootfs, err := graph.driver.Get(img.ID, "") if err != nil { return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } diff --git a/image/image.go b/image/image.go index 239e5cc055..b56cbf08ee 100644 --- a/image/image.go +++ b/image/image.go @@ -98,7 +98,7 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro return err } } else { - parent, err := driver.Get(img.Parent) + parent, err := driver.Get(img.Parent, "") if err != nil { return err } @@ -159,7 +159,7 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { return differ.Diff(img.ID) } - imgFs, err := driver.Get(img.ID) + imgFs, err := driver.Get(img.ID, "") if err != nil { return nil, err } @@ -182,7 +182,7 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) { }), nil } - parentFs, err := driver.Get(img.Parent) + parentFs, err := driver.Get(img.Parent, "") if err != nil { return nil, err } diff --git a/integration/graph_test.go b/integration/graph_test.go index a7a8137284..c29055edfc 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -43,7 +43,7 @@ func TestMount(t *testing.T) { t.Fatal(err) } - if _, err := driver.Get(image.ID); err != nil { + if _, err := driver.Get(image.ID, ""); err != nil { t.Fatal(err) } } diff --git a/integration/runtime_test.go b/integration/runtime_test.go index bf00437547..c84ea5bed2 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -874,12 +874,12 @@ func TestDestroyWithInitLayer(t *testing.T) { driver := daemon.Graph().Driver() // Make sure that the container does not exist in the driver - if _, err := driver.Get(container.ID); err == nil { + if _, err := driver.Get(container.ID, ""); err == nil { t.Fatal("Conttainer should not exist in the driver") } // Make sure that the init layer is removed from the driver - if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID)); err == nil { + if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { t.Fatal("Container's init layer should not exist in the driver") } } diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 9f7463f79b..2f67ee458f 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -32,13 +32,13 @@ func GenLabels(options string) (string, string, error) { return processLabel, mountLabel, err } -func FormatMountLabel(src string, mountLabel string) string { - if selinux.SelinuxEnabled() && mountLabel != "" { +func FormatMountLabel(src, mountLabel string) string { + if mountLabel != "" { switch src { case "": - src = fmt.Sprintf("%s,context=%s", src, mountLabel) + src = fmt.Sprintf("context=%q", mountLabel) default: - src = fmt.Sprintf("context=%s", mountLabel) + src = fmt.Sprintf("%s,context=%q", src, mountLabel) } } return src diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 4e50bc513b..36c8cd1245 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -75,8 +75,9 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } } runtime.LockOSThread() + if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { - return fmt.Errorf("SetProcessLabel label %s", err) + return fmt.Errorf("set process label %s", err) } ns.logger.Printf("execing %s\n", args[0]) return system.Execv(args[0], args[0:], container.Env) From b7942ec2ca7c7568df0c3b7eb554b05e2c3a3081 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Mon, 21 Apr 2014 17:09:26 -0400 Subject: [PATCH 092/219] This patch reworks the SELinux patch to be only run on demand by the daemon Added --selinux-enable switch to daemon to enable SELinux labeling. The daemon will now generate a new unique random SELinux label when a container starts, and remove it when the container is removed. The MCS labels will be stored in the daemon memory. The labels of containers will be stored in the container.json file. When the daemon restarts on boot or if done by an admin, it will read all containers json files and reserve the MCS labels. A potential problem would be conflicts if you setup thousands of containers, current scheme would handle ~500,000 containers. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: crosbymichael) --- daemon/container.go | 18 +++++++++++------- daemon/daemon.go | 4 +++- daemonconfig/config.go | 2 +- docker/docker.go | 2 +- pkg/label/label.go | 4 ++++ pkg/label/label_selinux.go | 4 ++++ pkg/selinux/selinux.go | 28 ++++++++++++++++++++++++---- pkg/selinux/selinux_test.go | 2 ++ 8 files changed, 50 insertions(+), 14 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 4416a4c212..2190869da0 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -11,7 +11,7 @@ import ( "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/selinux" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" @@ -66,7 +66,7 @@ type Container struct { stdinPipe io.WriteCloser daemon *Daemon - mountLabel, processLabel string + MountLabel, ProcessLabel string waitLock chan struct{} Volumes map[string]string @@ -124,6 +124,7 @@ func (container *Container) FromDisk() error { if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } + label.ReserveLabel(container.ProcessLabel) return container.readHostConfig() } @@ -325,8 +326,8 @@ func populateCommand(c *Container, env []string) { en *execdriver.Network context = make(map[string][]string) ) - context["process_label"] = []string{c.processLabel} - context["mount_label"] = []string{c.mountLabel} + context["process_label"] = []string{c.ProcessLabel} + context["mount_label"] = []string{c.MountLabel} en = &execdriver.Network{ Mtu: c.daemon.config.Mtu, @@ -388,10 +389,13 @@ func (container *Container) Start() (err error) { return err } - process, mount := selinux.GetLxcContexts() + process, mount, err := label.GenLabels("") + if err != nil { + return err + } - container.mountLabel = mount - container.processLabel = process + container.MountLabel = mount + container.ProcessLabel = process if err := container.Mount(); err != nil { return err diff --git a/daemon/daemon.go b/daemon/daemon.go index 50707fbc98..2aac521072 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -289,6 +289,8 @@ func (daemon *Daemon) Destroy(container *Container) error { if err := os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } + selinux.FreeLxcContexts(container.ProcessLabel) + return nil } @@ -839,7 +841,7 @@ func (daemon *Daemon) Close() error { } func (daemon *Daemon) Mount(container *Container) error { - dir, err := daemon.driver.Get(container.ID, container.mountLabel) + dir, err := daemon.driver.Get(container.ID, container.MountLabel) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) } diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 87ac52224d..619bfe582f 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -47,7 +47,7 @@ func ConfigFromJob(job *engine.Job) *Config { InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), ExecDriver: job.Getenv("ExecDriver"), - EnableSelinuxSupport: job.GetenvBool("SelinuxEnabled"), + EnableSelinuxSupport: job.GetenvBool("EnableSelinuxSupport"), } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns diff --git a/docker/docker.go b/docker/docker.go index ce3c54dacd..7c366001b7 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -149,7 +149,7 @@ func main() { job.Setenv("GraphDriver", *flGraphDriver) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) - job.SetenvBool("SelinuxEnabled", *flSelinuxEnabled) + job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled) if err := job.Run(); err != nil { log.Fatal(err) } diff --git a/pkg/label/label.go b/pkg/label/label.go index 38f026bc5a..434e1c5725 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -24,3 +24,7 @@ func GetPidCon(pid int) (string, error) { func Init() { } + +func ReserveLabel(label string) error { + return nil +} diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 2f67ee458f..9361a7142c 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -75,3 +75,7 @@ func GetPidCon(pid int) (string, error) { func Init() { selinux.SelinuxEnabled() } + +func ReserveLabel(label string) { + selinux.ReserveLabel(label) +} diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index edabc4f7dd..422c39babd 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -204,6 +204,13 @@ func NewContext(scon string) SELinuxContext { return c } +func ReserveLabel(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsAdd(con[3]) + } +} + func SelinuxGetEnforce() int { var enforce int @@ -229,8 +236,12 @@ func SelinuxGetEnforceMode() int { return Disabled } -func mcsAdd(mcs string) { +func mcsAdd(mcs string) error { + if mcsList[mcs] { + return fmt.Errorf("MCS Label already exists") + } mcsList[mcs] = true + return nil } func mcsDelete(mcs string) { @@ -283,15 +294,21 @@ func uniqMcs(catRange uint32) string { } } mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) - if mcsExists(mcs) { + if err := mcsAdd(mcs); err != nil { continue } - mcsAdd(mcs) break } return mcs } +func FreeLxcContexts(scon string) { + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + mcsDelete(con[3]) + } +} + func GetLxcContexts() (processLabel string, fileLabel string) { var ( val, key string @@ -344,7 +361,8 @@ func GetLxcContexts() (processLabel string, fileLabel string) { } exit: - mcs := IntToMcs(os.Getpid(), 1024) + // mcs := IntToMcs(os.Getpid(), 1024) + mcs := uniqMcs(1024) scon := NewContext(processLabel) scon["level"] = mcs processLabel = scon.Get() @@ -373,6 +391,8 @@ func CopyLevel(src, dest string) (string, error) { } scon := NewContext(src) tcon := NewContext(dest) + mcsDelete(tcon["level"]) + mcsAdd(scon["level"]) tcon["level"] = scon["level"] return tcon.Get(), nil } diff --git a/pkg/selinux/selinux_test.go b/pkg/selinux/selinux_test.go index fde6ab147d..9a3a5525e4 100644 --- a/pkg/selinux/selinux_test.go +++ b/pkg/selinux/selinux_test.go @@ -31,9 +31,11 @@ func TestSELinux(t *testing.T) { plabel, flabel = selinux.GetLxcContexts() t.Log(plabel) t.Log(flabel) + selinux.FreeLxcContexts(plabel) plabel, flabel = selinux.GetLxcContexts() t.Log(plabel) t.Log(flabel) + selinux.FreeLxcContexts(plabel) t.Log("getenforce ", selinux.SelinuxGetEnforce()) t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) pid := os.Getpid() From 12934ef3a40d814cb307dfea0cc86124ec997593 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Fri, 25 Apr 2014 14:34:42 -0400 Subject: [PATCH 093/219] Fix SELinux errors caused by multi-threading Occasionally the selinux_test program will fail because we are setting file context based on the Process ID but not the TID. THis change will always use the TID to set SELinux labels. Docker-DCO-1.1-Signed-off-by: Daniel Walsh (github: rhatdan) Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: crosbymichael) --- pkg/selinux/selinux.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index 422c39babd..6cf7bd7104 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -146,15 +146,15 @@ func Setfilecon(path string, scon string) error { } func Setfscreatecon(scon string) error { - return writeCon("/proc/self/attr/fscreate", scon) + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", system.Gettid()), scon) } func Getfscreatecon() (string, error) { - return readCon("/proc/self/attr/fscreate") + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", system.Gettid())) } func getcon() (string, error) { - return readCon("/proc/self/attr/current") + return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", system.Gettid())) } func Getpidcon(pid int) (string, error) { From ae006493054e524ed35c08863f1713986fe0a22c Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 28 Apr 2014 14:17:31 -0700 Subject: [PATCH 094/219] Update devicemapper to pass mount flag Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/graphdriver/devmapper/driver.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go index 558feef327..9f240d96e0 100644 --- a/daemon/graphdriver/devmapper/driver.go +++ b/daemon/graphdriver/devmapper/driver.go @@ -4,11 +4,12 @@ package devmapper import ( "fmt" - "github.com/dotcloud/docker/daemon/graphdriver" - "github.com/dotcloud/docker/utils" "io/ioutil" "os" "path" + + "github.com/dotcloud/docker/daemon/graphdriver" + "github.com/dotcloud/docker/utils" ) func init() { @@ -98,7 +99,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { } // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, ""); err != nil { + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { return "", err } From 46e05ed2d96efca9bdb466d20138fde1994769ba Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 28 Apr 2014 14:36:04 -0700 Subject: [PATCH 095/219] Update process labels to be set at create not start Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 32 ++++++++++------------- daemon/daemon.go | 25 +++++++++++------- daemon/graphdriver/devmapper/deviceset.go | 9 +++---- pkg/label/label_selinux.go | 6 +++-- 4 files changed, 37 insertions(+), 35 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 2190869da0..17eaac7323 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -4,6 +4,16 @@ import ( "encoding/json" "errors" "fmt" + "io" + "io/ioutil" + "log" + "os" + "path" + "strings" + "sync" + "syscall" + "time" + "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/daemon/graphdriver" @@ -14,15 +24,6 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "os" - "path" - "strings" - "sync" - "syscall" - "time" ) const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -124,7 +125,10 @@ func (container *Container) FromDisk() error { if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } - label.ReserveLabel(container.ProcessLabel) + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } return container.readHostConfig() } @@ -389,14 +393,6 @@ func (container *Container) Start() (err error) { return err } - process, mount, err := label.GenLabels("") - if err != nil { - return err - } - - container.MountLabel = mount - container.ProcessLabel = process - if err := container.Mount(); err != nil { return err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 2aac521072..cdd1bb915f 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -3,6 +3,16 @@ package daemon import ( "container/list" "fmt" + "io" + "io/ioutil" + "log" + "os" + "path" + "regexp" + "strings" + "sync" + "time" + "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/daemon/execdriver/execdrivers" @@ -17,20 +27,12 @@ import ( "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/pkg/selinux" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "os" - "path" - "regexp" - "strings" - "sync" - "time" ) // Set the max depth to the aufs default that most @@ -535,6 +537,11 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i ExecDriver: daemon.execDriver.Name(), } container.root = daemon.containerRoot(container.ID) + + if container.MountLabel, container.ProcessLabel, err = label.GenLabels(""); err != nil { + return nil, err + } + return container, nil } diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index a562210e55..a96331d812 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -6,8 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "path" @@ -17,6 +15,9 @@ import ( "sync" "syscall" "time" + + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/utils" ) var ( @@ -858,7 +859,6 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { defer devices.Unlock() if info.mountCount > 0 { - fmt.Printf("---> already mounted\n") if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) } @@ -874,12 +874,9 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { var flags uintptr = sysMsMgcVal mountOptions := label.FormatMountLabel("discard", mountLabel) - fmt.Printf("-----> setting mount label %s\n", mountOptions) - err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) if err != nil && err == sysEInval { mountOptions = label.FormatMountLabel("", mountLabel) - fmt.Printf("-----> setting mount label after error %s\n", mountOptions) err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) } if err != nil { diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 9361a7142c..926f7fffa8 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -4,8 +4,9 @@ package label import ( "fmt" - "github.com/dotcloud/docker/pkg/selinux" "strings" + + "github.com/dotcloud/docker/pkg/selinux" ) func GenLabels(options string) (string, string, error) { @@ -76,6 +77,7 @@ func Init() { selinux.SelinuxEnabled() } -func ReserveLabel(label string) { +func ReserveLabel(label string) error { selinux.ReserveLabel(label) + return nil } From 64d0f7e39b395a3fc52f441a53f188a19bd53cf3 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 29 Apr 2014 00:47:09 -0700 Subject: [PATCH 096/219] Add cli flag to docs for selinux support Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- docs/sources/reference/commandline/cli.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index ac589c01b2..75a5be33b6 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -60,6 +60,7 @@ expect an integer, and they can only be specified once. -d, --daemon=false: Enable daemon mode --dns=[]: Force docker to use specific DNS servers --dns-search=[]: Force Docker to use specific DNS search domains + --enable-selinux=false: Enable selinux support for running containers -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime --icc=true: Enable inter-container communication --ip="0.0.0.0": Default IP address to use when binding container ports From 1a5ffef6c6ea8c5cc31c298bbdf6f7a29c60fbb8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 29 Apr 2014 01:08:19 -0700 Subject: [PATCH 097/219] Do not return labels when in privileged mode Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 21 ++++++++++++++++++--- daemon/daemon.go | 5 ++--- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 17eaac7323..5e4b72bf12 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -330,8 +330,8 @@ func populateCommand(c *Container, env []string) { en *execdriver.Network context = make(map[string][]string) ) - context["process_label"] = []string{c.ProcessLabel} - context["mount_label"] = []string{c.MountLabel} + context["process_label"] = []string{c.GetProcessLabel()} + context["mount_label"] = []string{c.GetMountLabel()} en = &execdriver.Network{ Mtu: c.daemon.config.Mtu, @@ -392,7 +392,6 @@ func (container *Container) Start() (err error) { if err := container.setupContainerDns(); err != nil { return err } - if err := container.Mount(); err != nil { return err } @@ -1192,3 +1191,19 @@ func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bind bindings[port] = binding return nil } + +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.hostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +func (container *Container) GetMountLabel() string { + if container.hostConfig.Privileged { + return "" + } + return container.MountLabel +} diff --git a/daemon/daemon.go b/daemon/daemon.go index cdd1bb915f..64a53989d0 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -538,10 +538,9 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i } container.root = daemon.containerRoot(container.ID) - if container.MountLabel, container.ProcessLabel, err = label.GenLabels(""); err != nil { + if container.ProcessLabel, container.MountLabel, err = label.GenLabels(""); err != nil { return nil, err } - return container, nil } @@ -848,7 +847,7 @@ func (daemon *Daemon) Close() error { } func (daemon *Daemon) Mount(container *Container) error { - dir, err := daemon.driver.Get(container.ID, container.MountLabel) + dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) } From 0c7143b32386c62cccd529de69abf88df938757d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 29 Apr 2014 03:41:44 -0700 Subject: [PATCH 098/219] Add mountlabel to dev Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/mount/init.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index 06b2c82f56..735970cded 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -4,14 +4,15 @@ package mount import ( "fmt" + "os" + "path/filepath" + "syscall" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" - "os" - "path/filepath" - "syscall" ) // default mount point flags @@ -130,11 +131,12 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo } if len(mounts.OfType("devtmpfs")) == 1 { - systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: "mode=755"}) + systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}) } systemMounts = append(systemMounts, mount{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, - mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}) + mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, + ) if len(mounts.OfType("sysfs")) == 1 { systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}) From 070747a21365959d3179d8df627d3e614318e202 Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Tue, 29 Apr 2014 18:59:20 +0000 Subject: [PATCH 099/219] Cleanup existing controllers when cleanup fails mid-way. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/cgroups/fs/apply_raw.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/cgroups/fs/apply_raw.go b/pkg/cgroups/fs/apply_raw.go index 60f318e9ef..5f9fc826b3 100644 --- a/pkg/cgroups/fs/apply_raw.go +++ b/pkg/cgroups/fs/apply_raw.go @@ -67,6 +67,7 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { } for _, sys := range subsystems { if err := sys.Set(d); err != nil { + d.Cleanup() return nil, err } } From 9651ff46bfc19b815e3f78bc5d6895a352db2d37 Mon Sep 17 00:00:00 2001 From: Sam Rijs Date: Tue, 29 Apr 2014 21:06:48 +0200 Subject: [PATCH 100/219] docs: DisableNetwork -> NetworkDisabled Docker-DCO-1.1-Signed-off-by: Samuel Reis (github: srijs) --- docs/sources/reference/api/docker_remote_api_v1.10.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index 2c42e19798..bbf3592cfc 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -129,7 +129,7 @@ Create a container "/tmp": {} }, "WorkingDir":"", - "DisableNetwork": false, + "NetworkDisabled": false, "ExposedPorts":{ "22/tcp": {} } @@ -1153,7 +1153,7 @@ Create a new image from a container's changes "/tmp": {} }, "WorkingDir":"", - "DisableNetwork": false, + "NetworkDisabled": false, "ExposedPorts":{ "22/tcp": {} } From 83a5f2a192f4c8bb87e9cbd80df42a0496615a1a Mon Sep 17 00:00:00 2001 From: amangoel Date: Tue, 29 Apr 2014 13:49:36 -0700 Subject: [PATCH 101/219] Update working-with-docker.md --- docs/sources/introduction/working-with-docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index 17ed7ff761..e76c80cffa 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -236,7 +236,7 @@ the container will also stop. ### Listing containers We can see a list of all the containers on our host using the `docker -ps` command. By default the `docker ps` commands only shows running +ps` command. By default the `docker ps` command only shows running containers. But we can also add the `-a` flag to show *all* containers - both running and stopped. From 6e05c420c9b7e4660b391643be21720d6f80e18d Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Tue, 29 Apr 2014 20:03:33 +0000 Subject: [PATCH 102/219] Add a TESTDIRS variable to the test bundle to allow for the running of a single go directory worth of tests. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- Makefile | 2 +- hack/make/test | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e020c14eac..5945dc2737 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)") -DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e TESTDIRS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" # to allow `make DOCSDIR=docs docs-shell` DOCKER_RUN_DOCS := docker run --rm -it $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) -e AWS_S3_BUCKET diff --git a/hack/make/test b/hack/make/test index 183ce95c24..828fdf6efd 100644 --- a/hack/make/test +++ b/hack/make/test @@ -17,8 +17,13 @@ bundle_test() { { date + # Run all the tests if no TESTDIRS were specified. + if [ -z "$TESTDIRS" ]; then + TESTDIRS=$(find_dirs '*_test.go') + fi + TESTS_FAILED=() - for test_dir in $(find_dirs '*_test.go'); do + for test_dir in $TESTDIRS; do echo if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir "$test_dir"; then From d5d62ff95574a48816890d8d6e0785a79f559c3c Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 28 Apr 2014 23:22:54 -0600 Subject: [PATCH 103/219] Close extraneous file descriptors in containers Without this patch, containers inherit the open file descriptors of the daemon, so my "exec 42>&2" allows us to "echo >&42 some nasty error with some bad advice" directly into the daemon log. :) Also, "hack/dind" was already doing this due to issues caused by the inheritance, so I'm removing that hack too since this patch obsoletes it by generalizing it for all containers. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- daemon/execdriver/lxc/driver.go | 5 ++++ hack/dind | 16 ----------- hack/make/test-integration-cli | 3 ++ integration-cli/docker_cli_run_test.go | 16 +++++++++++ pkg/libcontainer/nsinit/init.go | 8 ++++-- pkg/system/fds_linux.go | 38 ++++++++++++++++++++++++++ pkg/system/fds_unsupported.go | 12 ++++++++ 7 files changed, 80 insertions(+), 18 deletions(-) create mode 100644 pkg/system/fds_linux.go create mode 100644 pkg/system/fds_unsupported.go diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 1232d608a3..6ee7f3c1dd 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -5,6 +5,7 @@ import ( "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" "io/ioutil" "log" @@ -42,6 +43,10 @@ func init() { return err } + if err := system.CloseFdsFrom(3); err != nil { + return err + } + if err := changeUser(args); err != nil { return err } diff --git a/hack/dind b/hack/dind index 94147f5324..e3641a342f 100755 --- a/hack/dind +++ b/hack/dind @@ -70,22 +70,6 @@ grep -q :devices: /proc/1/cgroup || grep -qw devices /proc/1/cgroup || echo "WARNING: it looks like the 'devices' cgroup is not mounted." -# Now, close extraneous file descriptors. -pushd /proc/self/fd >/dev/null -for FD in * -do - case "$FD" in - # Keep stdin/stdout/stderr - [012]) - ;; - # Nuke everything else - *) - eval exec "$FD>&-" - ;; - esac -done -popd >/dev/null - # Mount /tmp mount -t tmpfs none /tmp diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 92d1373f59..7e8e82f34f 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -19,6 +19,9 @@ bundle_test_integration_cli() { false fi + # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers + exec 41>&1 42>&2 + ( set -x; exec \ docker --daemon --debug \ --storage-driver "$DOCKER_GRAPHDRIVER" \ diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 5973f2fe1b..76ae226b8d 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -91,6 +91,22 @@ func TestDockerRunEchoNamedContainer(t *testing.T) { logDone("run - echo with named container") } +// docker run should not leak file descriptors +func TestDockerRunLeakyFileDescriptors(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + t.Errorf("container should've printed '0 1 2 3', not: %s", out) + } + + deleteAllContainers() + + logDone("run - check file descriptor leakage") +} + // it should be possible to ping Google DNS resolver // this will fail when Internet access is unavailable func TestDockerRunPingGoogle(t *testing.T) { diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 67095fdba1..d6b40f34fe 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -130,12 +130,16 @@ func setupNetwork(container *libcontainer.Container, context libcontainer.Contex return nil } -// finalizeNamespace drops the caps and sets the correct user -// and working dir before execing the command inside the namespace +// finalizeNamespace drops the caps, sets the correct user +// and working dir, and closes any leaky file descriptors +// before execing the command inside the namespace func finalizeNamespace(container *libcontainer.Container) error { if err := capabilities.DropCapabilities(container); err != nil { return fmt.Errorf("drop capabilities %s", err) } + if err := system.CloseFdsFrom(3); err != nil { + return fmt.Errorf("close open file descriptors %s", err) + } if err := setupUser(container); err != nil { return fmt.Errorf("setup user %s", err) } diff --git a/pkg/system/fds_linux.go b/pkg/system/fds_linux.go new file mode 100644 index 0000000000..53d2299d3e --- /dev/null +++ b/pkg/system/fds_linux.go @@ -0,0 +1,38 @@ +package system + +import ( + "io/ioutil" + "strconv" + "syscall" +) + +// Works similarly to OpenBSD's "closefrom(2)": +// The closefrom() call deletes all descriptors numbered fd and higher from +// the per-process file descriptor table. It is effectively the same as +// calling close(2) on each descriptor. +// http://www.openbsd.org/cgi-bin/man.cgi?query=closefrom&sektion=2 +// +// See also http://stackoverflow.com/a/918469/433558 +func CloseFdsFrom(minFd int) error { + fdList, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + return err + } + for _, fi := range fdList { + fd, err := strconv.Atoi(fi.Name()) + if err != nil { + // ignore non-numeric file names + continue + } + + if fd < minFd { + // ignore descriptors lower than our specified minimum + continue + } + + // intentionally ignore errors from syscall.Close + syscall.Close(fd) + // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) + } + return nil +} diff --git a/pkg/system/fds_unsupported.go b/pkg/system/fds_unsupported.go new file mode 100644 index 0000000000..c1e08e82d3 --- /dev/null +++ b/pkg/system/fds_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package system + +import ( + "fmt" + "runtime" +) + +func CloseFdsFrom(minFd int) error { + return fmt.Errorf("CloseFdsFrom is unsupported on this platform (%s/%s)", runtime.GOOS, runtime.GOARCH) +} From a39f3c9200d574c12b88fb91654f1242ec07b7b5 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Tue, 29 Apr 2014 22:49:03 +0000 Subject: [PATCH 104/219] Add new test-unit make rule which only runs the unit tests. Renames test bundle to test-unit. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- Makefile | 7 +++++-- hack/make.sh | 2 +- hack/make/{test => test-unit} | 6 +++--- hack/release.sh | 2 +- 4 files changed, 10 insertions(+), 7 deletions(-) rename hack/make/{test => test-unit} (92%) diff --git a/Makefile b/Makefile index 5945dc2737..a4c8658e08 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli validate +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate # to allow `make BINDDIR=. shell` or `make BINDDIR= test` BINDDIR := bundles @@ -35,7 +35,10 @@ docs-release: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli + +test-unit: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration diff --git a/hack/make.sh b/hack/make.sh index 46df398c57..8636756c87 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -45,7 +45,7 @@ DEFAULT_BUNDLES=( binary - test + test-unit test-integration test-integration-cli diff --git a/hack/make/test b/hack/make/test-unit similarity index 92% rename from hack/make/test rename to hack/make/test-unit index 828fdf6efd..066865859c 100644 --- a/hack/make/test +++ b/hack/make/test-unit @@ -11,9 +11,9 @@ TEXTRESET=$'\033[0m' # reset the foreground colour # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # -# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test +# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit # -bundle_test() { +bundle_test_unit() { { date @@ -52,4 +52,4 @@ bundle_test() { } 2>&1 | tee $DEST/test.log } -bundle_test +bundle_test_unit diff --git a/hack/release.sh b/hack/release.sh index d77d454e27..8642a4edb9 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -54,7 +54,7 @@ RELEASE_BUNDLES=( if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( - test test-integration + test-unit test-integration "${RELEASE_BUNDLES[@]}" test-integration-cli ) From ebaff50bd2c94f2353f4bd738e077c7baef3e879 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 30 Apr 2014 10:12:21 +1000 Subject: [PATCH 105/219] for want of a comma, the kingdom was lost Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/s3_website.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/s3_website.json b/docs/s3_website.json index 89e3ecd3cf..fb14628ce6 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -9,9 +9,9 @@ { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } } + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }, { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } }, - { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } } ] } From a1a9baf926ff8ec2bd7ba0dd39cf3a9eb5fab1d3 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 29 Apr 2014 23:19:21 -0600 Subject: [PATCH 106/219] Update pkg/apparmor to provide a better error message when apparmor_parser cannot be found Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- pkg/apparmor/setup.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/apparmor/setup.go b/pkg/apparmor/setup.go index 2401f63414..ef6333a01a 100644 --- a/pkg/apparmor/setup.go +++ b/pkg/apparmor/setup.go @@ -69,15 +69,8 @@ func InstallDefaultProfile(backupPath string) error { cmd.Dir = "/etc/apparmor.d" output, err := cmd.CombinedOutput() - if err != nil && !os.IsNotExist(err) { - if e, ok := err.(*exec.Error); ok { - // keeping with the current profile load code, if the parser does not - // exist then just return - if e.Err == exec.ErrNotFound || os.IsNotExist(e.Err) { - return nil - } - } - return fmt.Errorf("Error loading docker profile: %s (%s)", err, output) + if err != nil { + return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output) } return nil } From 9e2e26c69a52c0b55d31de477a652731f87b4558 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 30 Apr 2014 10:12:21 +1000 Subject: [PATCH 107/219] for want of a comma, the kingdom was lost Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/s3_website.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/s3_website.json b/docs/s3_website.json index 89e3ecd3cf..fb14628ce6 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -9,9 +9,9 @@ { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "ReplaceKeyPrefixWith": "" } }, - { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } } + { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "ReplaceKeyPrefixWith": "jsearch/" } }, { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "ReplaceKeyPrefixWith": "docker-io/" } }, - { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, + { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } } ] } From 494c789ac34d3f22d5f6275937155e0aa2647374 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Tue, 29 Apr 2014 22:49:03 +0000 Subject: [PATCH 108/219] Add new test-unit make rule which only runs the unit tests. Renames test bundle to test-unit. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- Makefile | 7 +++++-- hack/make.sh | 2 +- hack/make/{test => test-unit} | 6 +++--- hack/release.sh | 2 +- 4 files changed, 10 insertions(+), 7 deletions(-) rename hack/make/{test => test-unit} (92%) diff --git a/Makefile b/Makefile index 5945dc2737..a4c8658e08 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli validate +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli validate # to allow `make BINDDIR=. shell` or `make BINDDIR= test` BINDDIR := bundles @@ -35,7 +35,10 @@ docs-release: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./release.sh test: build - $(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-unit test-integration test-integration-cli + +test-unit: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration diff --git a/hack/make.sh b/hack/make.sh index 46df398c57..8636756c87 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -45,7 +45,7 @@ DEFAULT_BUNDLES=( binary - test + test-unit test-integration test-integration-cli diff --git a/hack/make/test b/hack/make/test-unit similarity index 92% rename from hack/make/test rename to hack/make/test-unit index 828fdf6efd..066865859c 100644 --- a/hack/make/test +++ b/hack/make/test-unit @@ -11,9 +11,9 @@ TEXTRESET=$'\033[0m' # reset the foreground colour # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # -# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test +# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit # -bundle_test() { +bundle_test_unit() { { date @@ -52,4 +52,4 @@ bundle_test() { } 2>&1 | tee $DEST/test.log } -bundle_test +bundle_test_unit diff --git a/hack/release.sh b/hack/release.sh index d77d454e27..8642a4edb9 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -54,7 +54,7 @@ RELEASE_BUNDLES=( if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( - test test-integration + test-unit test-integration "${RELEASE_BUNDLES[@]}" test-integration-cli ) From e802b69146ac7a008d943a3a289fba56150b4f81 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 25 Mar 2014 13:19:41 +0100 Subject: [PATCH 109/219] beam: Add more tests to unix_test.go These are failing, and indicate things that need to be fixed. The primarily problem is the lack of framing between beam messages. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) [solomon@docker.com: rebased on master] Signed-off-by: Solomon Hykes --- pkg/beam/unix_test.go | 151 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/pkg/beam/unix_test.go b/pkg/beam/unix_test.go index 09815aa0d6..976f089c23 100644 --- a/pkg/beam/unix_test.go +++ b/pkg/beam/unix_test.go @@ -25,6 +25,30 @@ func TestSocketPair(t *testing.T) { fmt.Printf("still open: %v\n", a.Fd()) } +func TestUSocketPair(t *testing.T) { + a, b, err := USocketPair() + if err != nil { + t.Fatal(err) + } + + data := "hello world!" + go func() { + a.Write([]byte(data)) + a.Close() + }() + res := make([]byte, 1024) + size, err := b.Read(res) + if err != nil { + t.Fatal(err) + } + if size != len(data) { + t.Fatal("Unexpected size") + } + if string(res[0:size]) != data { + t.Fatal("Unexpected data") + } +} + func TestSendUnixSocket(t *testing.T) { a1, a2, err := USocketPair() if err != nil { @@ -83,4 +107,131 @@ func TestSendUnixSocket(t *testing.T) { t.Fatal(err) } fmt.Printf("---> %s\n", data) + +} + +// Ensure we get proper segmenting of messages +func TestSendSegmenting(t *testing.T) { + a, b, err := USocketPair() + if err != nil { + t.Fatal(err) + } + defer a.Close() + defer b.Close() + + extrafd1, extrafd2, err := SocketPair() + if err != nil { + t.Fatal(err) + } + extrafd2.Close() + + go func() { + a.Send([]byte("message 1"), nil) + a.Send([]byte("message 2"), extrafd1) + a.Send([]byte("message 3"), nil) + }() + + msg1, file1, err := b.Receive() + if err != nil { + t.Fatal(err) + } + if string(msg1) != "message 1" { + t.Fatal("unexpected msg1:", string(msg1)) + } + if file1 != nil { + t.Fatal("unexpectedly got file1") + } + + msg2, file2, err := b.Receive() + if err != nil { + t.Fatal(err) + } + if string(msg2) != "message 2" { + t.Fatal("unexpected msg2:", string(msg2)) + } + if file2 == nil { + t.Fatal("didn't get file2") + } + file2.Close() + + msg3, file3, err := b.Receive() + if err != nil { + t.Fatal(err) + } + if string(msg3) != "message 3" { + t.Fatal("unexpected msg3:", string(msg3)) + } + if file3 != nil { + t.Fatal("unexpectedly got file3") + } + +} + +// Test sending a zero byte message +func TestSendEmpty(t *testing.T) { + a, b, err := USocketPair() + if err != nil { + t.Fatal(err) + } + defer a.Close() + defer b.Close() + go func() { + a.Send([]byte{}, nil) + }() + + msg, file, err := b.Receive() + if err != nil { + t.Fatal(err) + } + if len(msg) != 0 { + t.Fatalf("unexpected non-empty message: %v", msg) + } + if file != nil { + t.Fatal("unexpectedly got file") + } + +} + +func makeLarge(size int) []byte { + res := make([]byte, size) + for i := range res { + res[i] = byte(i % 255) + } + return res +} + +func verifyLarge(data []byte, size int) bool { + if len(data) != size { + return false + } + for i := range data { + if data[i] != byte(i%255) { + return false + } + } + return true +} + +// Test sending a large message +func TestSendLarge(t *testing.T) { + a, b, err := USocketPair() + if err != nil { + t.Fatal(err) + } + defer a.Close() + defer b.Close() + go func() { + a.Send(makeLarge(100000), nil) + }() + + msg, file, err := b.Receive() + if err != nil { + t.Fatal(err) + } + if !verifyLarge(msg, 100000) { + t.Fatalf("unexpected message (size %d)", len(msg)) + } + if file != nil { + t.Fatal("unexpectedly got file") + } } From 4fac4d2149856f0740a98e87ef0e0e6bd04d1b53 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 30 Apr 2014 14:13:39 +0300 Subject: [PATCH 110/219] check if the daemon is run as root on startup This commit makes Docker throw an error if the daemon isn't started as root. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- docker/docker.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/docker.go b/docker/docker.go index 4d90ab8b2e..f0001b8f3d 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -97,6 +97,10 @@ func main() { } if *flDaemon { + if os.Geteuid() != 0 { + log.Fatalf("The Docker daemon needs to be run as root") + } + if flag.NArg() != 0 { flag.Usage() return From 8caef610b2548273d48ace696d3a90ba0b510d66 Mon Sep 17 00:00:00 2001 From: Bryan Matsuo Date: Fri, 25 Apr 2014 03:23:05 -0600 Subject: [PATCH 111/219] update AUTHORS Docker-DCO-1.1-Signed-off-by: Bryan Matsuo (github: bmatsuo) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 6e34065266..8fb76c057e 100644 --- a/AUTHORS +++ b/AUTHORS @@ -44,6 +44,7 @@ Brian Olsen Brian Shumate Briehan Lombaard Bruno Bigras +Bryan Matsuo Caleb Spare Calen Pennington Carl X. Su From defecac2799ca0c72532b7e6ed6005cc54ee2e25 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 30 Apr 2014 11:22:11 -0600 Subject: [PATCH 112/219] Fix various MAINTAINERS format inconsistencies Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/zfs/MAINTAINERS | 2 +- engine/MAINTAINERS | 2 +- integration/MAINTAINERS | 2 +- pkg/beam/MAINTAINERS | 2 +- pkg/dockerscript/MAINTAINERS | 2 +- pkg/signal/MAINTAINERS | 1 - server/MAINTAINERS | 2 +- 7 files changed, 6 insertions(+), 7 deletions(-) diff --git a/contrib/zfs/MAINTAINERS b/contrib/zfs/MAINTAINERS index 90bc6e3d60..05fb572459 100644 --- a/contrib/zfs/MAINTAINERS +++ b/contrib/zfs/MAINTAINERS @@ -1 +1 @@ -Gurjeet Singh (gurjeet.singh.im) +Gurjeet Singh (@gurjeet) diff --git a/engine/MAINTAINERS b/engine/MAINTAINERS index db33365bcd..aee10c8421 100644 --- a/engine/MAINTAINERS +++ b/engine/MAINTAINERS @@ -1 +1 @@ -Solomon Hykes +Solomon Hykes (@shykes) diff --git a/integration/MAINTAINERS b/integration/MAINTAINERS index 2d47d7a711..d7bef621cf 100644 --- a/integration/MAINTAINERS +++ b/integration/MAINTAINERS @@ -1,4 +1,4 @@ -Solomon Hykes +Solomon Hykes (@shykes) # WE ARE LOOKING FOR VOLUNTEERS TO HELP CLEAN THIS UP. # TO VOLUNTEER PLEASE OPEN A PULL REQUEST ADDING YOURSELF TO THIS FILE. # WE WILL HELP YOU GET STARTED. THANKS! diff --git a/pkg/beam/MAINTAINERS b/pkg/beam/MAINTAINERS index db33365bcd..aee10c8421 100644 --- a/pkg/beam/MAINTAINERS +++ b/pkg/beam/MAINTAINERS @@ -1 +1 @@ -Solomon Hykes +Solomon Hykes (@shykes) diff --git a/pkg/dockerscript/MAINTAINERS b/pkg/dockerscript/MAINTAINERS index db33365bcd..aee10c8421 100644 --- a/pkg/dockerscript/MAINTAINERS +++ b/pkg/dockerscript/MAINTAINERS @@ -1 +1 @@ -Solomon Hykes +Solomon Hykes (@shykes) diff --git a/pkg/signal/MAINTAINERS b/pkg/signal/MAINTAINERS index 3300331598..acf6f21b63 100644 --- a/pkg/signal/MAINTAINERS +++ b/pkg/signal/MAINTAINERS @@ -1,2 +1 @@ Guillaume J. Charmes (@creack) - diff --git a/server/MAINTAINERS b/server/MAINTAINERS index db33365bcd..aee10c8421 100644 --- a/server/MAINTAINERS +++ b/server/MAINTAINERS @@ -1 +1 @@ -Solomon Hykes +Solomon Hykes (@shykes) From 5f00372af2e0aa0ec540830ba575fe83ae16017f Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Wed, 30 Apr 2014 19:23:51 +0200 Subject: [PATCH 113/219] Add notes about git commit messages This improves readability of commits a lot and is easy to follow. I think most people follow those rules already. They are based on http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html Docker-DCO-1.1-Signed-off-by: Johannes 'fish' Ziemke (github: discordianfish) --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0e8b98122f..5a1ad4b0ab 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -90,6 +90,10 @@ reference to all the issues that they address. Pull requests must not contain commits from other users or branches. +Commit messages must start with a capitalized and short summary (max. 50 +chars) written in the imperative, followed by an optional, more detailed +explanatory text which is separated from the summary by an empty line. + Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be sure to post a comment after pushing. The new commits will show up in the pull From 0037dc8d60bc22e34838e15efaa1041ea2826c2c Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 30 Apr 2014 11:56:14 -0600 Subject: [PATCH 114/219] Remove contrib/zfs directory A WIP pull request or issue for discussion would be a better avenue for collaboration and discussion of a ZFS backend. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/zfs/MAINTAINERS | 1 - contrib/zfs/README.md | 23 ----------------------- 2 files changed, 24 deletions(-) delete mode 100644 contrib/zfs/MAINTAINERS delete mode 100644 contrib/zfs/README.md diff --git a/contrib/zfs/MAINTAINERS b/contrib/zfs/MAINTAINERS deleted file mode 100644 index 05fb572459..0000000000 --- a/contrib/zfs/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Gurjeet Singh (@gurjeet) diff --git a/contrib/zfs/README.md b/contrib/zfs/README.md deleted file mode 100644 index 84f6296e10..0000000000 --- a/contrib/zfs/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# ZFS Storage Driver - -This is a placeholder to declare the presence and status of ZFS storage driver -for containers. - -The current development is done in Gurjeet Singh's fork of Docker, under the -branch named [zfs_driver]. - -[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver - - -# Status - -Alpha: The code is now capable of creating, running and destroying containers -and images. - -The code is under development. Contributions in the form of suggestions, -code-reviews, and patches are welcome. - -Please send the communication to gurjeet@singh.im and CC at least one Docker -mailing list. - - From 99284a24e76bf52582e5b70ad28b6371bd4f4b42 Mon Sep 17 00:00:00 2001 From: Bryan Matsuo Date: Fri, 25 Apr 2014 03:11:32 -0600 Subject: [PATCH 115/219] FIXES #5398: pkg/graphdb build only dependent on cgo tag Docker-DCO-1.1-Signed-off-by: Bryan Matsuo (github: bmatsuo) --- pkg/graphdb/conn_sqlite3.go | 2 +- pkg/graphdb/conn_unsupported.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go index 5b5f8e6bfc..33355ae4dc 100644 --- a/pkg/graphdb/conn_sqlite3.go +++ b/pkg/graphdb/conn_sqlite3.go @@ -1,4 +1,4 @@ -// +build linux,amd64 freebsd,cgo +// +build cgo package graphdb diff --git a/pkg/graphdb/conn_unsupported.go b/pkg/graphdb/conn_unsupported.go index 0a48634336..3895051661 100644 --- a/pkg/graphdb/conn_unsupported.go +++ b/pkg/graphdb/conn_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!freebsd linux,!amd64 freebsd,!cgo +// +build !cgo package graphdb From 6203d8b462ee9dbc42b651ac8b6b2d83b9eef963 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 12:01:06 -0700 Subject: [PATCH 116/219] Add system maintainers Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/system/MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 pkg/system/MAINTAINERS diff --git a/pkg/system/MAINTAINERS b/pkg/system/MAINTAINERS new file mode 100644 index 0000000000..1cb551364d --- /dev/null +++ b/pkg/system/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) From 1fd919bbf41e68cd38a4794e1f8e3d585650a231 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Wed, 30 Apr 2014 12:20:52 -0700 Subject: [PATCH 117/219] Make Jerome Petazzoni a maintainer for dind In the spirit of getting more contributors to maintain their components.. I nominate @jpetazzo ot maintain dind (he's the original author). @jpetazzo I don't expect this to be too much load, but it's a good and symbolic start :) Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- hack/MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/MAINTAINERS b/hack/MAINTAINERS index 18e05a3070..299d9a14af 100644 --- a/hack/MAINTAINERS +++ b/hack/MAINTAINERS @@ -1 +1,2 @@ Tianon Gravi (@tianon) +dind: Jerome Petazzoni (@jpetazzo) From 986c647d5ac9bda0633cdeb927bb782d8df02269 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Mon, 28 Apr 2014 17:04:56 -0700 Subject: [PATCH 118/219] Fix bridge ip comparison Docker-DCO-1.1-Signed-off-by: Tibor Vass (github: tiborvass) --- daemon/networkdriver/bridge/driver.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 90782a5824..4828e3b9cf 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -97,8 +97,12 @@ func InitDriver(job *engine.Job) engine.Status { network = addr.(*net.IPNet) // validate that the bridge ip matches the ip specified by BridgeIP if bridgeIP != "" { - if !network.IP.Equal(net.ParseIP(bridgeIP)) { - return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bridgeIP) + bip, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return job.Error(err) + } + if !network.IP.Equal(bip) { + return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bip) } } } From 162dafbcd5c4d57c7f436e11d90423ee6d7c3ce1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 15:24:18 -0700 Subject: [PATCH 119/219] Remove logger from nsinit struct Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 17 ++--------- pkg/libcontainer/nsinit/exec.go | 14 +-------- pkg/libcontainer/nsinit/execin.go | 11 +++---- pkg/libcontainer/nsinit/init.go | 6 ---- pkg/libcontainer/nsinit/nsinit.go | 9 ++---- pkg/libcontainer/nsinit/nsinit/main.go | 42 ++++++-------------------- 6 files changed, 20 insertions(+), 79 deletions(-) diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 8b374d9938..812389c1bd 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -3,9 +3,7 @@ package native import ( "encoding/json" "fmt" - "io" "io/ioutil" - "log" "os" "os/exec" "path/filepath" @@ -31,7 +29,7 @@ func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { var ( container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger("")) + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) ) f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { @@ -102,7 +100,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c: c, dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, } - ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG"))) + ns = nsinit.NewNsInit(factory, stateWriter) args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { @@ -287,14 +285,3 @@ func (d *dockerStateWriter) WritePid(pid int, started string) error { func (d *dockerStateWriter) DeletePid() error { return d.dsw.DeletePid() } - -func createLogger(debug string) *log.Logger { - var w io.Writer - // if we are in debug mode set the logger to stderr - if debug != "" { - w = os.Stderr - } else { - w = ioutil.Discard - } - return log.New(w, "[libcontainer] ", log.LstdFlags) -} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 430dd89ff3..17201b6def 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -30,10 +30,8 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err != nil { return -1, err } - ns.logger.Printf("created sync pipe parent fd %d child fd %d\n", syncPipe.parent.Fd(), syncPipe.child.Fd()) if container.Tty { - ns.logger.Println("creating master and console") master, console, err = system.CreateMasterAndConsole() if err != nil { return -1, err @@ -42,13 +40,11 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ } command := ns.commandFactory.Create(container, console, syncPipe.child, args) - ns.logger.Println("attach terminal to command") if err := term.Attach(command); err != nil { return -1, err } defer term.Close() - ns.logger.Println("starting command") if err := command.Start(); err != nil { return -1, err } @@ -57,19 +53,14 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err != nil { return -1, err } - ns.logger.Printf("writing pid %d to file\n", command.Process.Pid) if err := ns.stateWriter.WritePid(command.Process.Pid, started); err != nil { command.Process.Kill() return -1, err } - defer func() { - ns.logger.Println("removing pid file") - ns.stateWriter.DeletePid() - }() + defer ns.stateWriter.DeletePid() // Do this before syncing with child so that no children // can escape the cgroup - ns.logger.Println("setting cgroups") activeCgroup, err := ns.SetupCgroups(container, command.Process.Pid) if err != nil { command.Process.Kill() @@ -79,13 +70,11 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ defer activeCgroup.Cleanup() } - ns.logger.Println("setting up network") if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { command.Process.Kill() return -1, err } - ns.logger.Println("closing sync pipe with child") // Sync with child syncPipe.Close() @@ -95,7 +84,6 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ } } status := command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() - ns.logger.Printf("process exited with status %d\n", status) return status, err } diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index b79881015f..c4ddb78ba0 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -4,14 +4,15 @@ package nsinit import ( "fmt" - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/mount" - "github.com/dotcloud/docker/pkg/system" "os" "path/filepath" "strconv" "syscall" + + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/mount" + "github.com/dotcloud/docker/pkg/system" ) // ExecIn uses an existing pid and joins the pid's namespaces with the new command. @@ -42,7 +43,6 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s // foreach namespace fd, use setns to join an existing container's namespaces for _, fd := range fds { if fd > 0 { - ns.logger.Printf("setns on %d\n", fd) if err := system.Setns(fd, 0); err != nil { closeFds() return -1, fmt.Errorf("setns %s", err) @@ -54,7 +54,6 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s // if the container has a new pid and mount namespace we need to // remount proc and sys to pick up the changes if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { - ns.logger.Println("forking to remount /proc and /sys") pid, err := system.Fork() if err != nil { return -1, err diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 4ca026d2e6..dd4fddeeb4 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -29,17 +29,14 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } // We always read this as it is a way to sync with the parent as well - ns.logger.Printf("reading from sync pipe fd %d\n", syncPipe.child.Fd()) context, err := syncPipe.ReadFromParent() if err != nil { syncPipe.Close() return err } - ns.logger.Println("received context from parent") syncPipe.Close() if consolePath != "" { - ns.logger.Printf("setting up %s as console\n", consolePath) if err := console.OpenAndDup(consolePath); err != nil { return err } @@ -57,7 +54,6 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } label.Init() - ns.logger.Println("setup mount namespace") if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } @@ -69,7 +65,6 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } if profile := container.Context["apparmor_profile"]; profile != "" { - ns.logger.Printf("setting apparmor profile %s\n", profile) if err := apparmor.ApplyProfile(os.Getpid(), profile); err != nil { return err } @@ -79,7 +74,6 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { return fmt.Errorf("set process label %s", err) } - ns.logger.Printf("execing %s\n", args[0]) return system.Execv(args[0], args[0:], container.Env) } diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go index c308692af6..6aed9c9dbc 100644 --- a/pkg/libcontainer/nsinit/nsinit.go +++ b/pkg/libcontainer/nsinit/nsinit.go @@ -1,9 +1,6 @@ package nsinit -import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "log" -) +import "github.com/dotcloud/docker/pkg/libcontainer" // NsInit is an interface with the public facing methods to provide high level // exec operations on a container @@ -17,13 +14,11 @@ type linuxNs struct { root string commandFactory CommandFactory stateWriter StateWriter - logger *log.Logger } -func NewNsInit(command CommandFactory, state StateWriter, logger *log.Logger) NsInit { +func NewNsInit(command CommandFactory, state StateWriter) NsInit { return &linuxNs{ commandFactory: command, stateWriter: state, - logger: logger, } } diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 0965c1c8ca..615e31d6db 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -3,7 +3,6 @@ package main import ( "encoding/json" "flag" - "io" "io/ioutil" "log" "os" @@ -38,12 +37,8 @@ func main() { if err != nil { log.Fatalf("Unable to load container: %s", err) } - l, err := getLogger("[exec] ") - if err != nil { - log.Fatal(err) - } - ns, err := newNsInit(l) + ns, err := newNsInit() if err != nil { log.Fatalf("Unable to initialize nsinit: %s", err) } @@ -54,7 +49,7 @@ func main() { nspid, err := readPid() if err != nil { if !os.IsNotExist(err) { - l.Fatalf("Unable to read pid: %s", err) + log.Fatalf("Unable to read pid: %s", err) } } if nspid > 0 { @@ -64,26 +59,26 @@ func main() { exitCode, err = ns.Exec(container, term, flag.Args()[1:]) } if err != nil { - l.Fatalf("Failed to exec: %s", err) + log.Fatalf("Failed to exec: %s", err) } os.Exit(exitCode) case "init": // this is executed inside of the namespace to setup the container cwd, err := os.Getwd() if err != nil { - l.Fatal(err) + log.Fatal(err) } if flag.NArg() < 2 { - l.Fatalf("wrong number of arguments %d", flag.NArg()) + log.Fatalf("wrong number of arguments %d", flag.NArg()) } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { - l.Fatalf("Unable to create sync pipe: %s", err) + log.Fatalf("Unable to create sync pipe: %s", err) } if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { - l.Fatalf("Unable to initialize for container: %s", err) + log.Fatalf("Unable to initialize for container: %s", err) } default: - l.Fatalf("command not supported for nsinit %s", flag.Arg(0)) + log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) } } @@ -113,23 +108,6 @@ func readPid() (int, error) { return pid, nil } -func newNsInit(l *log.Logger) (nsinit.NsInit, error) { - return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}, l), nil -} - -func getLogger(prefix string) (*log.Logger, error) { - var w io.Writer - switch logs { - case "", "none": - w = ioutil.Discard - case "stderr": - w = os.Stderr - default: // we have a filepath - f, err := os.OpenFile(logs, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) - if err != nil { - return nil, err - } - w = f - } - return log.New(w, prefix, log.LstdFlags), nil +func newNsInit() (nsinit.NsInit, error) { + return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil } From cd8cec854be33a74179618864cd528acf5129cd9 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 15:27:59 -0700 Subject: [PATCH 120/219] Export SetupUser Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index dd4fddeeb4..52708f4300 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -77,10 +77,11 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return system.Execv(args[0], args[0:], container.Env) } -func setupUser(container *libcontainer.Container) error { - uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) +// SetupUser changes the groups, gid, and uid for the user inside the container +func SetupUser(u string) error { + uid, gid, suppGids, err := user.GetUserGroupSupplementary(u, syscall.Getuid(), syscall.Getgid()) if err != nil { - return fmt.Errorf("GetUserGroupSupplementary %s", err) + return fmt.Errorf("get supplementary groups %s", err) } if err := system.Setgroups(suppGids); err != nil { return fmt.Errorf("setgroups %s", err) @@ -122,7 +123,7 @@ func finalizeNamespace(container *libcontainer.Container) error { if err := system.CloseFdsFrom(3); err != nil { return fmt.Errorf("close open file descriptors %s", err) } - if err := setupUser(container); err != nil { + if err := SetupUser(container.User); err != nil { return fmt.Errorf("setup user %s", err) } if container.WorkingDir != "" { From f1104014372e71e1f8ae7a63d17e18de5e2fa93a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 15:52:40 -0700 Subject: [PATCH 121/219] Remove statewriter interface, export more libcontainer funcs This temp. expands the Exec method's signature but adds a more robust way to know when the container's process is actually released and begins to run. The network interfaces are not guaranteed to be up yet but this provides a more accurate view with a single callback at this time. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 41 ++++++--------------- pkg/libcontainer/nsinit/exec.go | 49 +++++++++++++++++++++----- pkg/libcontainer/nsinit/nsinit.go | 6 ++-- pkg/libcontainer/nsinit/nsinit/main.go | 4 +-- pkg/libcontainer/nsinit/state.go | 36 ------------------- pkg/libcontainer/nsinit/unsupported.go | 2 +- 6 files changed, 56 insertions(+), 82 deletions(-) delete mode 100644 pkg/libcontainer/nsinit/state.go diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 812389c1bd..26c7d90474 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -29,7 +29,7 @@ func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { var ( container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}) ) f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { @@ -93,15 +93,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba d.activeContainers[c.ID] = &c.Cmd var ( - term nsinit.Terminal - factory = &dockerCommandFactory{c: c, driver: d} - stateWriter = &dockerStateWriter{ - callback: startCallback, - c: c, - dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, - } - ns = nsinit.NewNsInit(factory, stateWriter) - args = append([]string{c.Entrypoint}, c.Arguments...) + term nsinit.Terminal + factory = &dockerCommandFactory{c: c, driver: d} + pidRoot = filepath.Join(d.root, c.ID) + ns = nsinit.NewNsInit(factory) + args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err @@ -121,7 +117,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err := d.writeContainerFile(container, c.ID); err != nil { return -1, err } - return ns.Exec(container, term, args) + return ns.Exec(container, term, pidRoot, args, func() { + if startCallback != nil { + startCallback(c) + } + }) } func (d *driver) Kill(p *execdriver.Command, sig int) error { @@ -266,22 +266,3 @@ func (d *dockerCommandFactory) Create(container *libcontainer.Container, console return &d.c.Cmd } - -type dockerStateWriter struct { - dsw nsinit.StateWriter - c *execdriver.Command - callback execdriver.StartCallback -} - -func (d *dockerStateWriter) WritePid(pid int, started string) error { - d.c.ContainerPid = pid - err := d.dsw.WritePid(pid, started) - if d.callback != nil { - d.callback(d.c) - } - return err -} - -func (d *dockerStateWriter) DeletePid() error { - return d.dsw.DeletePid() -} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 17201b6def..64d35e51c3 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -3,8 +3,11 @@ package nsinit import ( + "fmt" + "io/ioutil" "os" "os/exec" + "path/filepath" "syscall" "github.com/dotcloud/docker/pkg/cgroups" @@ -17,7 +20,7 @@ import ( // Exec performes setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { +func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) { var ( master *os.File console string @@ -53,24 +56,24 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err != nil { return -1, err } - if err := ns.stateWriter.WritePid(command.Process.Pid, started); err != nil { + if err := WritePid(pidRoot, command.Process.Pid, started); err != nil { command.Process.Kill() return -1, err } - defer ns.stateWriter.DeletePid() + defer DeletePid(pidRoot) // Do this before syncing with child so that no children // can escape the cgroup - activeCgroup, err := ns.SetupCgroups(container, command.Process.Pid) + cleaner, err := SetupCgroups(container, command.Process.Pid) if err != nil { command.Process.Kill() return -1, err } - if activeCgroup != nil { - defer activeCgroup.Cleanup() + if cleaner != nil { + defer cleaner.Cleanup() } - if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { + if err := InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { command.Process.Kill() return -1, err } @@ -78,6 +81,10 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ // Sync with child syncPipe.Close() + if startCallback != nil { + startCallback() + } + if err := command.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { return -1, err @@ -87,7 +94,9 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ return status, err } -func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { +// SetupCgroups applies the cgroup restrictions to the process running in the contaienr based +// on the container's configuration +func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { if container.Cgroups != nil { c := container.Cgroups if systemd.UseSystemd() { @@ -98,7 +107,9 @@ func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) (c return nil, nil } -func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { +// InitializeNetworking creates the container's network stack outside of the namespace and moves +// interfaces into the container's net namespaces if necessary +func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { context := libcontainer.Context{} for _, config := range container.Networks { strategy, err := network.GetStrategy(config.Type) @@ -111,3 +122,23 @@ func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid } return pipe.SendToChild(context) } + +// WritePid writes the namespaced processes pid to pid and it's start time +// to the path specified +func WritePid(path string, pid int, startTime string) error { + err := ioutil.WriteFile(filepath.Join(path, "pid"), []byte(fmt.Sprint(pid)), 0655) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(path, "start"), []byte(startTime), 0655) +} + +// DeletePid removes the pid and started file from disk when the container's process +// dies and the container is cleanly removed +func DeletePid(path string) error { + err := os.Remove(filepath.Join(path, "pid")) + if serr := os.Remove(filepath.Join(path, "start")); err == nil { + err = serr + } + return err +} diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go index 6aed9c9dbc..506a39eaed 100644 --- a/pkg/libcontainer/nsinit/nsinit.go +++ b/pkg/libcontainer/nsinit/nsinit.go @@ -5,7 +5,7 @@ import "github.com/dotcloud/docker/pkg/libcontainer" // NsInit is an interface with the public facing methods to provide high level // exec operations on a container type NsInit interface { - Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) + Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error } @@ -13,12 +13,10 @@ type NsInit interface { type linuxNs struct { root string commandFactory CommandFactory - stateWriter StateWriter } -func NewNsInit(command CommandFactory, state StateWriter) NsInit { +func NewNsInit(command CommandFactory) NsInit { return &linuxNs{ commandFactory: command, - stateWriter: state, } } diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 615e31d6db..bcb0068ba9 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -56,7 +56,7 @@ func main() { exitCode, err = ns.ExecIn(container, nspid, flag.Args()[1:]) } else { term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = ns.Exec(container, term, flag.Args()[1:]) + exitCode, err = ns.Exec(container, term, root, flag.Args()[1:], nil) } if err != nil { log.Fatalf("Failed to exec: %s", err) @@ -109,5 +109,5 @@ func readPid() (int, error) { } func newNsInit() (nsinit.NsInit, error) { - return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil + return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}), nil } diff --git a/pkg/libcontainer/nsinit/state.go b/pkg/libcontainer/nsinit/state.go deleted file mode 100644 index 26d7fa4230..0000000000 --- a/pkg/libcontainer/nsinit/state.go +++ /dev/null @@ -1,36 +0,0 @@ -package nsinit - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// StateWriter handles writing and deleting the pid file -// on disk -type StateWriter interface { - WritePid(pid int, startTime string) error - DeletePid() error -} - -type DefaultStateWriter struct { - Root string -} - -// writePidFile writes the namespaced processes pid to pid in the rootfs for the container -func (d *DefaultStateWriter) WritePid(pid int, startTime string) error { - err := ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(d.Root, "start"), []byte(startTime), 0655) -} - -func (d *DefaultStateWriter) DeletePid() error { - err := os.Remove(filepath.Join(d.Root, "pid")) - if serr := os.Remove(filepath.Join(d.Root, "start")); err == nil { - err = serr - } - return err -} diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index 2412223d28..135c0ef314 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -6,7 +6,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" ) -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { +func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) { return -1, libcontainer.ErrUnsupported } From 5f6fda8cfd05dec002894d3e2214a04a58b62bed Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 16:07:12 -0700 Subject: [PATCH 122/219] Add ability to set cgroups freezer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/cgroups/cgroups.go | 1 + pkg/cgroups/fs/freezer.go | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index 81e3eb551a..86623845ae 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -20,6 +20,7 @@ type Cgroup struct { CpuQuota int64 `json:"cpu_quota,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. CpuPeriod int64 `json:"cpu_period,omitempty"` // CPU period to be used for hardcapping (in usecs). 0 to use system default. CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use + Freezer string `json:"freezer,omitempty"` // set the freeze value for the process UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties } diff --git a/pkg/cgroups/fs/freezer.go b/pkg/cgroups/fs/freezer.go index ebf5bb9672..70cfcdde72 100644 --- a/pkg/cgroups/fs/freezer.go +++ b/pkg/cgroups/fs/freezer.go @@ -2,21 +2,31 @@ package fs import ( "fmt" - "github.com/dotcloud/docker/pkg/cgroups" "io/ioutil" "os" "path/filepath" "strconv" "strings" + + "github.com/dotcloud/docker/pkg/cgroups" ) type freezerGroup struct { } func (s *freezerGroup) Set(d *data) error { - // we just want to join this group even though we don't set anything - if _, err := d.join("freezer"); err != nil && err != cgroups.ErrNotFound { - return err + dir, err := d.join("freezer") + if err != nil { + if err != cgroups.ErrNotFound { + return err + } + return nil + } + + if d.c.Freezer != "" { + if err := writeFile(dir, "freezer.state", d.c.Freezer); err != nil { + return err + } } return nil } From a3e96abb5aacaa763f3f9205316dd0aef1977f16 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 17:02:45 -0700 Subject: [PATCH 123/219] Export syncpipe fields Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/sync_pipe.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/libcontainer/nsinit/sync_pipe.go b/pkg/libcontainer/nsinit/sync_pipe.go index f724f525f0..d0bfdda865 100644 --- a/pkg/libcontainer/nsinit/sync_pipe.go +++ b/pkg/libcontainer/nsinit/sync_pipe.go @@ -3,9 +3,10 @@ package nsinit import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" "io/ioutil" "os" + + "github.com/dotcloud/docker/pkg/libcontainer" ) // SyncPipe allows communication to and from the child processes @@ -36,6 +37,14 @@ func NewSyncPipeFromFd(parendFd, childFd uintptr) (*SyncPipe, error) { return s, nil } +func (s *SyncPipe) Child() *os.File { + return s.child +} + +func (s *SyncPipe) Parent() *os.File { + return s.parent +} + func (s *SyncPipe) SendToChild(context libcontainer.Context) error { data, err := json.Marshal(context) if err != nil { From aecb9c39ab0eb5f09ebab40001fe0ff639ef617b Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 17:04:09 -0700 Subject: [PATCH 124/219] Split term files to make it easier to manage Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/std_term.go | 49 ++++++++++++++++ pkg/libcontainer/nsinit/term.go | 89 ----------------------------- pkg/libcontainer/nsinit/tty_term.go | 55 ++++++++++++++++++ 3 files changed, 104 insertions(+), 89 deletions(-) create mode 100644 pkg/libcontainer/nsinit/std_term.go create mode 100644 pkg/libcontainer/nsinit/tty_term.go diff --git a/pkg/libcontainer/nsinit/std_term.go b/pkg/libcontainer/nsinit/std_term.go new file mode 100644 index 0000000000..2b8201a71b --- /dev/null +++ b/pkg/libcontainer/nsinit/std_term.go @@ -0,0 +1,49 @@ +package nsinit + +import ( + "io" + "os" + "os/exec" +) + +type StdTerminal struct { + stdin io.Reader + stdout, stderr io.Writer +} + +func (s *StdTerminal) SetMaster(*os.File) { + // no need to set master on non tty +} + +func (s *StdTerminal) Close() error { + return nil +} + +func (s *StdTerminal) Resize(h, w int) error { + return nil +} + +func (s *StdTerminal) Attach(command *exec.Cmd) error { + inPipe, err := command.StdinPipe() + if err != nil { + return err + } + outPipe, err := command.StdoutPipe() + if err != nil { + return err + } + errPipe, err := command.StderrPipe() + if err != nil { + return err + } + + go func() { + defer inPipe.Close() + io.Copy(inPipe, s.stdin) + }() + + go io.Copy(s.stdout, outPipe) + go io.Copy(s.stderr, errPipe) + + return nil +} diff --git a/pkg/libcontainer/nsinit/term.go b/pkg/libcontainer/nsinit/term.go index 58dccab2b8..5fc801ab53 100644 --- a/pkg/libcontainer/nsinit/term.go +++ b/pkg/libcontainer/nsinit/term.go @@ -1,7 +1,6 @@ package nsinit import ( - "github.com/dotcloud/docker/pkg/term" "io" "os" "os/exec" @@ -28,91 +27,3 @@ func NewTerminal(stdin io.Reader, stdout, stderr io.Writer, tty bool) Terminal { stderr: stderr, } } - -type TtyTerminal struct { - stdin io.Reader - stdout, stderr io.Writer - master *os.File - state *term.State -} - -func (t *TtyTerminal) Resize(h, w int) error { - return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) -} - -func (t *TtyTerminal) SetMaster(master *os.File) { - t.master = master -} - -func (t *TtyTerminal) Attach(command *exec.Cmd) error { - go io.Copy(t.stdout, t.master) - go io.Copy(t.master, t.stdin) - - state, err := t.setupWindow(t.master, os.Stdin) - if err != nil { - command.Process.Kill() - return err - } - t.state = state - return err -} - -// SetupWindow gets the parent window size and sets the master -// pty to the current size and set the parents mode to RAW -func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) { - ws, err := term.GetWinsize(parent.Fd()) - if err != nil { - return nil, err - } - if err := term.SetWinsize(master.Fd(), ws); err != nil { - return nil, err - } - return term.SetRawTerminal(parent.Fd()) -} - -func (t *TtyTerminal) Close() error { - term.RestoreTerminal(os.Stdin.Fd(), t.state) - return t.master.Close() -} - -type StdTerminal struct { - stdin io.Reader - stdout, stderr io.Writer -} - -func (s *StdTerminal) SetMaster(*os.File) { - // no need to set master on non tty -} - -func (s *StdTerminal) Close() error { - return nil -} - -func (s *StdTerminal) Resize(h, w int) error { - return nil -} - -func (s *StdTerminal) Attach(command *exec.Cmd) error { - inPipe, err := command.StdinPipe() - if err != nil { - return err - } - outPipe, err := command.StdoutPipe() - if err != nil { - return err - } - errPipe, err := command.StderrPipe() - if err != nil { - return err - } - - go func() { - defer inPipe.Close() - io.Copy(inPipe, s.stdin) - }() - - go io.Copy(s.stdout, outPipe) - go io.Copy(s.stderr, errPipe) - - return nil -} diff --git a/pkg/libcontainer/nsinit/tty_term.go b/pkg/libcontainer/nsinit/tty_term.go new file mode 100644 index 0000000000..fcbd085c82 --- /dev/null +++ b/pkg/libcontainer/nsinit/tty_term.go @@ -0,0 +1,55 @@ +package nsinit + +import ( + "io" + "os" + "os/exec" + + "github.com/dotcloud/docker/pkg/term" +) + +type TtyTerminal struct { + stdin io.Reader + stdout, stderr io.Writer + master *os.File + state *term.State +} + +func (t *TtyTerminal) Resize(h, w int) error { + return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyTerminal) SetMaster(master *os.File) { + t.master = master +} + +func (t *TtyTerminal) Attach(command *exec.Cmd) error { + go io.Copy(t.stdout, t.master) + go io.Copy(t.master, t.stdin) + + state, err := t.setupWindow(t.master, os.Stdin) + if err != nil { + command.Process.Kill() + return err + } + t.state = state + return err +} + +// SetupWindow gets the parent window size and sets the master +// pty to the current size and set the parents mode to RAW +func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) { + ws, err := term.GetWinsize(parent.Fd()) + if err != nil { + return nil, err + } + if err := term.SetWinsize(master.Fd(), ws); err != nil { + return nil, err + } + return term.SetRawTerminal(parent.Fd()) +} + +func (t *TtyTerminal) Close() error { + term.RestoreTerminal(os.Stdin.Fd(), t.state) + return t.master.Close() +} From b6b0dfdba7bda13d630217830423580c3152899d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 17:18:07 -0700 Subject: [PATCH 125/219] Export more functions from libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/command.go | 16 +++------------- pkg/libcontainer/nsinit/exec.go | 11 +++++++++++ pkg/libcontainer/nsinit/execin.go | 2 +- pkg/libcontainer/nsinit/init.go | 15 +++++++-------- pkg/libcontainer/nsinit/unsupported.go | 4 ++++ 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go index 153a48ab59..3c7a0357c8 100644 --- a/pkg/libcontainer/nsinit/command.go +++ b/pkg/libcontainer/nsinit/command.go @@ -1,10 +1,11 @@ package nsinit import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/system" "os" "os/exec" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/system" ) // CommandFactory takes the container's configuration and options passed by the @@ -34,14 +35,3 @@ func (c *DefaultCommandFactory) Create(container *libcontainer.Container, consol command.ExtraFiles = []*os.File{pipe} return command } - -// GetNamespaceFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { - for _, ns := range namespaces { - if ns.Enabled { - flag |= ns.Value - } - } - return flag -} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 64d35e51c3..45a2a8b76a 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -142,3 +142,14 @@ func DeletePid(path string) error { } return err } + +// GetNamespaceFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare, and setns +func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { + for _, ns := range namespaces { + if ns.Enabled { + flag |= ns.Value + } + } + return flag +} diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index c4ddb78ba0..8507d9bd11 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -82,7 +82,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s os.Exit(state.Sys().(syscall.WaitStatus).ExitStatus()) } dropAndExec: - if err := finalizeNamespace(container); err != nil { + if err := FinalizeNamespace(container); err != nil { return -1, err } err = label.SetProcessLabel(processLabel) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 52708f4300..02785bf146 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -54,23 +54,22 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } label.Init() + if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := system.Sethostname(container.Hostname); err != nil { return fmt.Errorf("sethostname %s", err) } - if err := finalizeNamespace(container); err != nil { + if err := FinalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) } - if profile := container.Context["apparmor_profile"]; profile != "" { - if err := apparmor.ApplyProfile(os.Getpid(), profile); err != nil { - return err - } - } runtime.LockOSThread() + if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { + return err + } if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { return fmt.Errorf("set process label %s", err) } @@ -113,10 +112,10 @@ func setupNetwork(container *libcontainer.Container, context libcontainer.Contex return nil } -// finalizeNamespace drops the caps, sets the correct user +// FinalizeNamespace drops the caps, sets the correct user // and working dir, and closes any leaky file descriptors // before execing the command inside the namespace -func finalizeNamespace(container *libcontainer.Container) error { +func FinalizeNamespace(container *libcontainer.Container) error { if err := capabilities.DropCapabilities(container); err != nil { return fmt.Errorf("drop capabilities %s", err) } diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index 135c0ef314..6274870bfc 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -17,3 +17,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { return libcontainer.ErrUnsupported } + +func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { + return 0 +} From 176c49d7a9e5a81b6c80e18dea84864148360597 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 17:55:15 -0700 Subject: [PATCH 126/219] Remove command factory and NsInit interface from libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/command.go | 37 ------------- pkg/libcontainer/nsinit/exec.go | 49 ++++++++++++++--- pkg/libcontainer/nsinit/execin.go | 6 +-- pkg/libcontainer/nsinit/init.go | 20 ++++++- pkg/libcontainer/nsinit/nsinit.go | 22 -------- pkg/libcontainer/nsinit/nsinit/main.go | 74 ++++++++++---------------- pkg/libcontainer/nsinit/unsupported.go | 12 ----- 7 files changed, 94 insertions(+), 126 deletions(-) delete mode 100644 pkg/libcontainer/nsinit/command.go delete mode 100644 pkg/libcontainer/nsinit/nsinit.go diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go deleted file mode 100644 index 3c7a0357c8..0000000000 --- a/pkg/libcontainer/nsinit/command.go +++ /dev/null @@ -1,37 +0,0 @@ -package nsinit - -import ( - "os" - "os/exec" - - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/system" -) - -// CommandFactory takes the container's configuration and options passed by the -// parent processes and creates an *exec.Cmd that will be used to fork/exec the -// namespaced init process -type CommandFactory interface { - Create(container *libcontainer.Container, console string, syncFd *os.File, args []string) *exec.Cmd -} - -type DefaultCommandFactory struct { - Root string -} - -// Create will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -func (c *DefaultCommandFactory) Create(container *libcontainer.Container, console string, pipe *os.File, args []string) *exec.Cmd { - // get our binary name from arg0 so we can always reexec ourself - command := exec.Command(os.Args[0], append([]string{ - "-console", console, - "-pipe", "3", - "-root", c.Root, - "init"}, args...)...) - - system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) - command.Env = container.Env - command.ExtraFiles = []*os.File{pipe} - return command -} diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 45a2a8b76a..5aa98af58e 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -20,7 +20,7 @@ import ( // Exec performes setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) { +func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, startCallback func()) (int, error) { var ( master *os.File console string @@ -42,7 +42,7 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoo term.SetMaster(master) } - command := ns.commandFactory.Create(container, console, syncPipe.child, args) + command := CreateCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args) if err := term.Attach(command); err != nil { return -1, err } @@ -56,11 +56,11 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoo if err != nil { return -1, err } - if err := WritePid(pidRoot, command.Process.Pid, started); err != nil { + if err := WritePid(dataPath, command.Process.Pid, started); err != nil { command.Process.Kill() return -1, err } - defer DeletePid(pidRoot) + defer DeletePid(dataPath) // Do this before syncing with child so that no children // can escape the cgroup @@ -90,8 +90,45 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoo return -1, err } } - status := command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() - return status, err + return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil +} + +// CreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +// +// console: the /dev/console to setup inside the container +// init: the progam executed inside the namespaces +// root: the path to the container json file and information +// pipe: sync pipe to syncronize the parent and child processes +// args: the arguemnts to pass to the container to run as the user's program +func CreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { + // get our binary name from arg0 so we can always reexec ourself + env := []string{ + "console=" + console, + "pipe=3", + "data_path=" + dataPath, + } + + /* + TODO: move user and wd into env + if user != "" { + env = append(env, "user="+user) + } + if workingDir != "" { + env = append(env, "wd="+workingDir) + } + */ + + command := exec.Command(init, append([]string{"init"}, args...)...) + // make sure the process is executed inside the context of the rootfs + command.Dir = rootfs + command.Env = append(os.Environ(), env...) + + system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) + command.ExtraFiles = []*os.File{pipe} + + return command } // SetupCgroups applies the cgroup restrictions to the process running in the contaienr based diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index 8507d9bd11..ac405e1a8d 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -16,7 +16,7 @@ import ( ) // ExecIn uses an existing pid and joins the pid's namespaces with the new command. -func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { +func ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { for _, nsv := range container.Namespaces { // skip the PID namespace on unshare because it it not supported if nsv.Key != "NEWPID" { @@ -25,7 +25,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s } } } - fds, err := ns.getNsFds(nspid, container) + fds, err := getNsFds(nspid, container) closeFds := func() { for _, f := range fds { system.Closefd(f) @@ -95,7 +95,7 @@ dropAndExec: panic("unreachable") } -func (ns *linuxNs) getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { +func getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { fds := make([]uintptr, len(container.Namespaces)) for i, ns := range container.Namespaces { f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 02785bf146..faec12af32 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "runtime" + "strings" "syscall" "github.com/dotcloud/docker/pkg/apparmor" @@ -22,12 +23,18 @@ import ( // Init is the init process that first runs inside a new namespace to setup mounts, users, networking, // and other options required for the new container. -func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { +func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { rootfs, err := utils.ResolveRootfs(uncleanRootfs) if err != nil { return err } + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return err + } + // We always read this as it is a way to sync with the parent as well context, err := syncPipe.ReadFromParent() if err != nil { @@ -132,3 +139,14 @@ func FinalizeNamespace(container *libcontainer.Container) error { } return nil } + +func LoadContainerEnvironment(container *libcontainer.Container) error { + os.Clearenv() + for _, pair := range container.Env { + p := strings.SplitN(pair, "=", 2) + if err := os.Setenv(p[0], p[1]); err != nil { + return err + } + } + return nil +} diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go deleted file mode 100644 index 506a39eaed..0000000000 --- a/pkg/libcontainer/nsinit/nsinit.go +++ /dev/null @@ -1,22 +0,0 @@ -package nsinit - -import "github.com/dotcloud/docker/pkg/libcontainer" - -// NsInit is an interface with the public facing methods to provide high level -// exec operations on a container -type NsInit interface { - Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) - ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) - Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error -} - -type linuxNs struct { - root string - commandFactory CommandFactory -} - -func NewNsInit(command CommandFactory) NsInit { - return &linuxNs{ - commandFactory: command, - } -} diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index bcb0068ba9..6faa9c61cd 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "flag" "io/ioutil" "log" "os" @@ -14,76 +13,65 @@ import ( ) var ( - root, console, logs string - pipeFd int + dataPath = os.Getenv("data_path") + console = os.Getenv("console") + rawPipeFd = os.Getenv("pipe") ) -func registerFlags() { - flag.StringVar(&console, "console", "", "console (pty slave) path") - flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd") - flag.StringVar(&root, "root", ".", "root for storing configuration data") - flag.StringVar(&logs, "log", "none", "set stderr or a filepath to enable logging") - - flag.Parse() -} - func main() { - registerFlags() - - if flag.NArg() < 1 { - log.Fatalf("wrong number of arguments %d", flag.NArg()) + if len(os.Args) < 2 { + log.Fatalf("invalid number of arguments %d", len(os.Args)) } + container, err := loadContainer() if err != nil { - log.Fatalf("Unable to load container: %s", err) + log.Fatalf("unable to load container: %s", err) } - ns, err := newNsInit() - if err != nil { - log.Fatalf("Unable to initialize nsinit: %s", err) - } - - switch flag.Arg(0) { + switch os.Args[1] { case "exec": // this is executed outside of the namespace in the cwd - var exitCode int - nspid, err := readPid() - if err != nil { - if !os.IsNotExist(err) { - log.Fatalf("Unable to read pid: %s", err) - } + var nspid, exitCode int + if nspid, err = readPid(); err != nil && !os.IsNotExist(err) { + log.Fatalf("unable to read pid: %s", err) } + if nspid > 0 { - exitCode, err = ns.ExecIn(container, nspid, flag.Args()[1:]) + exitCode, err = nsinit.ExecIn(container, nspid, os.Args[2:]) } else { term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = ns.Exec(container, term, root, flag.Args()[1:], nil) + exitCode, err = nsinit.Exec(container, term, "", dataPath, os.Args[2:], nil) } + if err != nil { - log.Fatalf("Failed to exec: %s", err) + log.Fatalf("failed to exec: %s", err) } os.Exit(exitCode) case "init": // this is executed inside of the namespace to setup the container - cwd, err := os.Getwd() + // by default our current dir is always our rootfs + rootfs, err := os.Getwd() if err != nil { log.Fatal(err) } - if flag.NArg() < 2 { - log.Fatalf("wrong number of arguments %d", flag.NArg()) + + pipeFd, err := strconv.Atoi(rawPipeFd) + if err != nil { + log.Fatal(err) } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { - log.Fatalf("Unable to create sync pipe: %s", err) + log.Fatalf("unable to create sync pipe: %s", err) } - if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { - log.Fatalf("Unable to initialize for container: %s", err) + + if err := nsinit.Init(container, rootfs, console, syncPipe, os.Args[2:]); err != nil { + log.Fatalf("unable to initialize for container: %s", err) } default: - log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) + log.Fatalf("command not supported for nsinit %s", os.Args[0]) } } func loadContainer() (*libcontainer.Container, error) { - f, err := os.Open(filepath.Join(root, "container.json")) + f, err := os.Open(filepath.Join(dataPath, "container.json")) if err != nil { return nil, err } @@ -97,7 +85,7 @@ func loadContainer() (*libcontainer.Container, error) { } func readPid() (int, error) { - data, err := ioutil.ReadFile(filepath.Join(root, "pid")) + data, err := ioutil.ReadFile(filepath.Join(dataPath, "pid")) if err != nil { return -1, err } @@ -107,7 +95,3 @@ func readPid() (int, error) { } return pid, nil } - -func newNsInit() (nsinit.NsInit, error) { - return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}), nil -} diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index 6274870bfc..972d905cbb 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -6,18 +6,6 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" ) -func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, pidRoot string, args []string, startCallback func()) (int, error) { - return -1, libcontainer.ErrUnsupported -} - -func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { - return -1, libcontainer.ErrUnsupported -} - -func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { - return libcontainer.ErrUnsupported -} - func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { return 0 } From 60e4276f5af360dd3292e22993c0c132a86edc2e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 18:20:01 -0700 Subject: [PATCH 127/219] Integrate new structure into docker's native driver This duplicates some of the Exec code but I think it it worth it because the native driver is more straight forward and does not have the complexity have handling the type issues for now. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 140 +++++++++++++++++-------- pkg/libcontainer/nsinit/exec.go | 23 ---- pkg/libcontainer/nsinit/pid.go | 28 +++++ pkg/libcontainer/nsinit/unsupported.go | 13 +++ pkg/system/unsupported.go | 5 + 5 files changed, 143 insertions(+), 66 deletions(-) create mode 100644 pkg/libcontainer/nsinit/pid.go diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 26c7d90474..c73eb0aec5 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -27,10 +27,7 @@ const ( func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - var ( - container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}) - ) + var container *libcontainer.Container f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { return err @@ -41,7 +38,7 @@ func init() { } f.Close() - cwd, err := os.Getwd() + rootfs, err := os.Getwd() if err != nil { return err } @@ -49,7 +46,7 @@ func init() { if err != nil { return err } - if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { + if err := nsinit.Init(container, rootfs, args.Console, syncPipe, args.Args); err != nil { return err } return nil @@ -93,35 +90,87 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba d.activeContainers[c.ID] = &c.Cmd var ( - term nsinit.Terminal - factory = &dockerCommandFactory{c: c, driver: d} - pidRoot = filepath.Join(d.root, c.ID) - ns = nsinit.NewNsInit(factory) - args = append([]string{c.Entrypoint}, c.Arguments...) + master *os.File + console string + + dataPath = filepath.Join(d.root, c.ID) + args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err } defer d.removeContainerRoot(c.ID) - if c.Tty { - term = &dockerTtyTerm{ - pipes: pipes, - } - } else { - term = &dockerStdTerm{ - pipes: pipes, - } - } - c.Terminal = term if err := d.writeContainerFile(container, c.ID); err != nil { return -1, err } - return ns.Exec(container, term, pidRoot, args, func() { - if startCallback != nil { - startCallback(c) + + // create a pipe so that we can syncronize with the namespaced process and + // pass the veth name to the child + syncPipe, err := nsinit.NewSyncPipe() + if err != nil { + return -1, err + } + term := getTerminal(c, pipes) + + if container.Tty { + master, console, err = system.CreateMasterAndConsole() + if err != nil { + return -1, err } - }) + term.SetMaster(master) + } + + setupCommand(d, c, container, console, syncPipe.Child(), args) + if err := term.Attach(&c.Cmd); err != nil { + return -1, err + } + defer term.Close() + + if err := c.Start(); err != nil { + return -1, err + } + + started, err := system.GetProcessStartTime(c.Process.Pid) + if err != nil { + return -1, err + } + if err := nsinit.WritePid(dataPath, c.Process.Pid, started); err != nil { + c.Process.Kill() + return -1, err + } + defer nsinit.DeletePid(dataPath) + + // Do this before syncing with child so that no children + // can escape the cgroup + cleaner, err := nsinit.SetupCgroups(container, c.Process.Pid) + if err != nil { + c.Process.Kill() + return -1, err + } + if cleaner != nil { + defer cleaner.Cleanup() + } + + if err := nsinit.InitializeNetworking(container, c.Process.Pid, syncPipe); err != nil { + c.Process.Kill() + return -1, err + } + + // Sync with child + syncPipe.Close() + + if startCallback != nil { + startCallback(c) + } + + if err := c.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return -1, err + } + } + return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil + } func (d *driver) Kill(p *execdriver.Command, sig int) error { @@ -234,35 +283,40 @@ func getEnv(key string, env []string) string { return "" } -type dockerCommandFactory struct { - c *execdriver.Command - driver *driver +func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) nsinit.Terminal { + var term nsinit.Terminal + if c.Tty { + term = &dockerTtyTerm{ + pipes: pipes, + } + } else { + term = &dockerStdTerm{ + pipes: pipes, + } + } + c.Terminal = term + return term } -// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { +func setupCommand(d *driver, c *execdriver.Command, container *libcontainer.Container, console string, syncFile *os.File, args []string) { // we need to join the rootfs because nsinit will setup the rootfs and chroot - initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) + initPath := filepath.Join(c.Rootfs, c.InitPath) - d.c.Path = d.driver.initPath - d.c.Args = append([]string{ + c.Path = d.initPath + c.Args = append([]string{ initPath, "-driver", DriverName, "-console", console, "-pipe", "3", - "-root", filepath.Join(d.driver.root, d.c.ID), + "-root", filepath.Join(d.root, c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset - d.c.SysProcAttr = nil - system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) - d.c.ExtraFiles = []*os.File{syncFile} + c.SysProcAttr = nil + system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) + c.ExtraFiles = []*os.File{syncFile} - d.c.Env = container.Env - d.c.Dir = d.c.Rootfs - - return &d.c.Cmd + c.Env = container.Env + c.Dir = c.Rootfs } diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 5aa98af58e..078f277e80 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -3,11 +3,8 @@ package nsinit import ( - "fmt" - "io/ioutil" "os" "os/exec" - "path/filepath" "syscall" "github.com/dotcloud/docker/pkg/cgroups" @@ -160,26 +157,6 @@ func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *Sy return pipe.SendToChild(context) } -// WritePid writes the namespaced processes pid to pid and it's start time -// to the path specified -func WritePid(path string, pid int, startTime string) error { - err := ioutil.WriteFile(filepath.Join(path, "pid"), []byte(fmt.Sprint(pid)), 0655) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(path, "start"), []byte(startTime), 0655) -} - -// DeletePid removes the pid and started file from disk when the container's process -// dies and the container is cleanly removed -func DeletePid(path string) error { - err := os.Remove(filepath.Join(path, "pid")) - if serr := os.Remove(filepath.Join(path, "start")); err == nil { - err = serr - } - return err -} - // GetNamespaceFlags parses the container's Namespaces options to set the correct // flags on clone, unshare, and setns func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { diff --git a/pkg/libcontainer/nsinit/pid.go b/pkg/libcontainer/nsinit/pid.go new file mode 100644 index 0000000000..bba2f10e1b --- /dev/null +++ b/pkg/libcontainer/nsinit/pid.go @@ -0,0 +1,28 @@ +package nsinit + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// WritePid writes the namespaced processes pid to pid and it's start time +// to the path specified +func WritePid(path string, pid int, startTime string) error { + err := ioutil.WriteFile(filepath.Join(path, "pid"), []byte(fmt.Sprint(pid)), 0655) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(path, "start"), []byte(startTime), 0655) +} + +// DeletePid removes the pid and started file from disk when the container's process +// dies and the container is cleanly removed +func DeletePid(path string) error { + err := os.Remove(filepath.Join(path, "pid")) + if serr := os.Remove(filepath.Join(path, "start")); err == nil { + err = serr + } + return err +} diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index 972d905cbb..c99d881a59 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -3,9 +3,22 @@ package nsinit import ( + "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" ) +func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { + return libcontainer.ErrUnsupported +} + +func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { + return libcontainer.ErrUnsupported +} + +func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { + return nil, libcontainer.ErrUnsupported +} + func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { return 0 } diff --git a/pkg/system/unsupported.go b/pkg/system/unsupported.go index 4ae2a488aa..96ebc858f5 100644 --- a/pkg/system/unsupported.go +++ b/pkg/system/unsupported.go @@ -3,6 +3,7 @@ package system import ( + "os" "os/exec" ) @@ -23,3 +24,7 @@ func GetClockTicks() int { // just return 100 return 100 } + +func CreateMasterAndConsole() (*os.File, string, error) { + return nil, "", ErrNotSupportedPlatform +} From aa9705f832d847d6e6ce76e19f3c952c194c167e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 18:24:47 -0700 Subject: [PATCH 128/219] Fix execin with environment and Enabled support Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/execin.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index ac405e1a8d..608437f855 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -17,9 +17,15 @@ import ( // ExecIn uses an existing pid and joins the pid's namespaces with the new command. func ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { + // clear the current processes env and replace it with the environment + // defined on the container + if err := LoadContainerEnvironment(container); err != nil { + return -1, err + } + for _, nsv := range container.Namespaces { // skip the PID namespace on unshare because it it not supported - if nsv.Key != "NEWPID" { + if nsv.Enabled && nsv.Key != "NEWPID" { if err := system.Unshare(nsv.Value); err != nil { return -1, err } From da0d6dbd7b5b429b79ae4ea22957e8a14b4ca1ec Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 18:49:24 -0700 Subject: [PATCH 129/219] Make native driver use Exec func with different CreateCommand Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 116 ++++++------------------- pkg/libcontainer/nsinit/create.go | 10 +++ pkg/libcontainer/nsinit/exec.go | 8 +- pkg/libcontainer/nsinit/nsinit/main.go | 2 +- pkg/libcontainer/nsinit/unsupported.go | 4 + 5 files changed, 46 insertions(+), 94 deletions(-) create mode 100644 pkg/libcontainer/nsinit/create.go diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index c73eb0aec5..a397387f11 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -90,9 +90,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba d.activeContainers[c.ID] = &c.Cmd var ( - master *os.File - console string - dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.Entrypoint}, c.Arguments...) ) @@ -105,72 +102,36 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba return -1, err } - // create a pipe so that we can syncronize with the namespaced process and - // pass the veth name to the child - syncPipe, err := nsinit.NewSyncPipe() - if err != nil { - return -1, err - } term := getTerminal(c, pipes) - if container.Tty { - master, console, err = system.CreateMasterAndConsole() - if err != nil { - return -1, err + return nsinit.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd { + // we need to join the rootfs because nsinit will setup the rootfs and chroot + initPath := filepath.Join(c.Rootfs, c.InitPath) + + c.Path = d.initPath + c.Args = append([]string{ + initPath, + "-driver", DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.root, c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + c.SysProcAttr = nil + system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) + c.ExtraFiles = []*os.File{child} + + c.Env = container.Env + c.Dir = c.Rootfs + + return &c.Cmd + }, func() { + if startCallback != nil { + startCallback(c) } - term.SetMaster(master) - } - - setupCommand(d, c, container, console, syncPipe.Child(), args) - if err := term.Attach(&c.Cmd); err != nil { - return -1, err - } - defer term.Close() - - if err := c.Start(); err != nil { - return -1, err - } - - started, err := system.GetProcessStartTime(c.Process.Pid) - if err != nil { - return -1, err - } - if err := nsinit.WritePid(dataPath, c.Process.Pid, started); err != nil { - c.Process.Kill() - return -1, err - } - defer nsinit.DeletePid(dataPath) - - // Do this before syncing with child so that no children - // can escape the cgroup - cleaner, err := nsinit.SetupCgroups(container, c.Process.Pid) - if err != nil { - c.Process.Kill() - return -1, err - } - if cleaner != nil { - defer cleaner.Cleanup() - } - - if err := nsinit.InitializeNetworking(container, c.Process.Pid, syncPipe); err != nil { - c.Process.Kill() - return -1, err - } - - // Sync with child - syncPipe.Close() - - if startCallback != nil { - startCallback(c) - } - - if err := c.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { - return -1, err - } - } - return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil - + }) } func (d *driver) Kill(p *execdriver.Command, sig int) error { @@ -297,26 +258,3 @@ func getTerminal(c *execdriver.Command, pipes *execdriver.Pipes) nsinit.Terminal c.Terminal = term return term } - -func setupCommand(d *driver, c *execdriver.Command, container *libcontainer.Container, console string, syncFile *os.File, args []string) { - // we need to join the rootfs because nsinit will setup the rootfs and chroot - initPath := filepath.Join(c.Rootfs, c.InitPath) - - c.Path = d.initPath - c.Args = append([]string{ - initPath, - "-driver", DriverName, - "-console", console, - "-pipe", "3", - "-root", filepath.Join(d.root, c.ID), - "--", - }, args...) - - // set this to nil so that when we set the clone flags anything else is reset - c.SysProcAttr = nil - system.SetCloneFlags(&c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) - c.ExtraFiles = []*os.File{syncFile} - - c.Env = container.Env - c.Dir = c.Rootfs -} diff --git a/pkg/libcontainer/nsinit/create.go b/pkg/libcontainer/nsinit/create.go new file mode 100644 index 0000000000..d5cba464d2 --- /dev/null +++ b/pkg/libcontainer/nsinit/create.go @@ -0,0 +1,10 @@ +package nsinit + +import ( + "os" + "os/exec" + + "github.com/dotcloud/docker/pkg/libcontainer" +) + +type CreateCommand func(container *libcontainer.Container, console, rootfs, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 078f277e80..8886efeb32 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -17,7 +17,7 @@ import ( // Exec performes setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. -func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, startCallback func()) (int, error) { +func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { var ( master *os.File console string @@ -39,7 +39,7 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str term.SetMaster(master) } - command := CreateCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args) + command := createCommand(container, console, rootfs, dataPath, os.Args[0], syncPipe.child, args) if err := term.Attach(command); err != nil { return -1, err } @@ -90,7 +90,7 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } -// CreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// DefaultCreateCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces // defined on the container's configuration and use the current binary as the init with the // args provided // @@ -99,7 +99,7 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str // root: the path to the container json file and information // pipe: sync pipe to syncronize the parent and child processes // args: the arguemnts to pass to the container to run as the user's program -func CreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { +func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd { // get our binary name from arg0 so we can always reexec ourself env := []string{ "console=" + console, diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 6faa9c61cd..b5325d40b3 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -39,7 +39,7 @@ func main() { exitCode, err = nsinit.ExecIn(container, nspid, os.Args[2:]) } else { term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) - exitCode, err = nsinit.Exec(container, term, "", dataPath, os.Args[2:], nil) + exitCode, err = nsinit.Exec(container, term, "", dataPath, os.Args[2:], nsinit.DefaultCreateCommand, nil) } if err != nil { diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index c99d881a59..f213f2ec88 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -7,6 +7,10 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" ) +func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) { + return -1, libcontainer.ErrUnsupported +} + func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error { return libcontainer.ErrUnsupported } From 93f8e277de63b4bcf6dc82b8c4b7abf81d614355 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 30 Apr 2014 21:51:03 -0400 Subject: [PATCH 130/219] docker save: typo reference image, not container Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/server.go b/server/server.go index 0ab0a4a00b..51dd24b3fe 100644 --- a/server/server.go +++ b/server/server.go @@ -340,7 +340,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { // out is the writer where the images are written to. func (srv *Server) ImageExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) + return job.Errorf("Usage: %s IMAGE\n", job.Name) } name := job.Args[0] // get image json From d0bee7939482b982462c5848f24b2e5e9ad897ea Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 18:52:15 -0700 Subject: [PATCH 131/219] Remove container.json from readme No need to duplicate this information when we already have a container.json file in the root of libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/README.md | 151 +------------------------------------ 1 file changed, 1 insertion(+), 150 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index 31031b26cd..8e89153bd7 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -13,160 +13,11 @@ a `container.json` file is placed with the runtime configuration for how the pro should be contained and ran. Environment, networking, and different capabilities for the process are specified in this file. The configuration is used for each process executed inside the container. -Sample `container.json` file: -```json -{ - "mounts" : [ - { - "type" : "devtmpfs" - } - ], - "tty" : true, - "environment" : [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "hostname" : "koye", - "cgroups" : { - "parent" : "docker", - "name" : "docker-koye" - }, - "capabilities_mask" : [ - { - "value" : 8, - "key" : "SETPCAP", - "enabled" : false - }, - { - "enabled" : false, - "value" : 16, - "key" : "SYS_MODULE" - }, - { - "value" : 17, - "key" : "SYS_RAWIO", - "enabled" : false - }, - { - "key" : "SYS_PACCT", - "value" : 20, - "enabled" : false - }, - { - "value" : 21, - "key" : "SYS_ADMIN", - "enabled" : false - }, - { - "value" : 23, - "key" : "SYS_NICE", - "enabled" : false - }, - { - "value" : 24, - "key" : "SYS_RESOURCE", - "enabled" : false - }, - { - "key" : "SYS_TIME", - "value" : 25, - "enabled" : false - }, - { - "enabled" : false, - "value" : 26, - "key" : "SYS_TTY_CONFIG" - }, - { - "key" : "AUDIT_WRITE", - "value" : 29, - "enabled" : false - }, - { - "value" : 30, - "key" : "AUDIT_CONTROL", - "enabled" : false - }, - { - "enabled" : false, - "key" : "MAC_OVERRIDE", - "value" : 32 - }, - { - "enabled" : false, - "key" : "MAC_ADMIN", - "value" : 33 - }, - { - "key" : "NET_ADMIN", - "value" : 12, - "enabled" : false - }, - { - "value" : 27, - "key" : "MKNOD", - "enabled" : true - } - ], - "networks" : [ - { - "mtu" : 1500, - "address" : "127.0.0.1/0", - "type" : "loopback", - "gateway" : "localhost" - }, - { - "mtu" : 1500, - "address" : "172.17.42.2/16", - "type" : "veth", - "context" : { - "bridge" : "docker0", - "prefix" : "veth" - }, - "gateway" : "172.17.42.1" - } - ], - "namespaces" : [ - { - "key" : "NEWNS", - "value" : 131072, - "enabled" : true, - "file" : "mnt" - }, - { - "key" : "NEWUTS", - "value" : 67108864, - "enabled" : true, - "file" : "uts" - }, - { - "enabled" : true, - "file" : "ipc", - "key" : "NEWIPC", - "value" : 134217728 - }, - { - "file" : "pid", - "enabled" : true, - "value" : 536870912, - "key" : "NEWPID" - }, - { - "enabled" : true, - "file" : "net", - "key" : "NEWNET", - "value" : 1073741824 - } - ] -} -``` +See the `container.json` file for what the configuration should look like. Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace, the namespace will be joined by the new process. - You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. #### nsinit From 8ae53ef167d684ed41fd9bd1a04f7a70a1ad20b6 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 1 May 2014 12:41:57 +1000 Subject: [PATCH 132/219] add page_description and page_keywords md meta to mkdocs html template Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/theme/mkdocs/base.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html index 5371569596..ca418b3cd7 100644 --- a/docs/theme/mkdocs/base.html +++ b/docs/theme/mkdocs/base.html @@ -4,7 +4,8 @@ - {% if page_description %}{% endif %} + {% if meta.page_description %}{% endif %} + {% if meta.page_keywords %}{% endif %} {% if site_author %}{% endif %} {% if canonical_url %}{% endif %} @@ -14,7 +15,6 @@ {% if page_title != '**HIDDEN** - '+site_name %}{{ page_title }}{% else %}{{ site_name }}{% endif %} - {% if page_title != '**HIDDEN** - Docker' %}{{ page_title }}{% else %}{{ site_name }}{% endif %} --
-- -- -- - -
- -@@ -114,111 +72,7 @@ - {% block body %}{% endblock %} - - -- -
--
-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- - - - diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 7971c56d9e..29b926816c 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -119,10 +119,10 @@ pages: # - ['static_files/README.md', 'static_files', 'README'] - ['terms/index.md', '**HIDDEN**'] -- ['terms/layer.md', '**HIDDEN**', 'layer'] -- ['terms/index.md', '**HIDDEN**', 'Home'] -- ['terms/registry.md', '**HIDDEN**', 'registry'] -- ['terms/container.md', '**HIDDEN**', 'container'] -- ['terms/repository.md', '**HIDDEN**', 'repository'] -- ['terms/filesystem.md', '**HIDDEN**', 'filesystem'] -- ['terms/image.md', '**HIDDEN**', 'image'] +- ['terms/layer.md', '**HIDDEN**'] +- ['terms/index.md', '**HIDDEN**'] +- ['terms/registry.md', '**HIDDEN**'] +- ['terms/container.md', '**HIDDEN**'] +- ['terms/repository.md', '**HIDDEN**'] +- ['terms/filesystem.md', '**HIDDEN**'] +- ['terms/image.md', '**HIDDEN**'] diff --git a/docs/pr4923.patch b/docs/pr4923.patch deleted file mode 100644 index ef420520f7..0000000000 --- a/docs/pr4923.patch +++ /dev/null @@ -1,12836 +0,0 @@ -diff --git a/docs/sources/articles.md b/docs/sources/articles.md -index da5a2d2..48654b0 100644 ---- a/docs/sources/articles.md -+++ b/docs/sources/articles.md -@@ -1,8 +1,7 @@ --# Articles - --## Contents: -+# Articles - --- [Docker Security](security/) --- [Create a Base Image](baseimages/) --- [Runtime Metrics](runmetrics/) -+- [Docker Security](security/) -+- [Create a Base Image](baseimages/) -+- [Runtime Metrics](runmetrics/) - -diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md -index 1a832d1..2372282 100644 ---- a/docs/sources/articles/runmetrics.md -+++ b/docs/sources/articles/runmetrics.md -@@ -56,7 +56,7 @@ ID or long ID of the container. If a container shows up as ae836c95b4c3 - in `docker ps`, its long ID might be something like - `ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`{.docutils - .literal}. You can look it up with `docker inspect` --or `docker ps -notrunc`. -+or `docker ps --no-trunc`. - - Putting everything together to look at the memory metrics for a Docker - container, take a look at -diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md -index 23d595f..13917f0 100644 ---- a/docs/sources/articles/security.md -+++ b/docs/sources/articles/security.md -@@ -5,7 +5,7 @@ page_keywords: Docker, Docker documentation, security - # Docker Security - - > *Adapted from* [Containers & Docker: How Secure are --> They?](blogsecurity) -+> They?](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/) - - There are three major areas to consider when reviewing Docker security: - -@@ -255,4 +255,4 @@ with Docker, since everything is provided by the kernel anyway. - - For more context and especially for comparisons with VMs and other - container systems, please also see the [original blog --post](blogsecurity). -+post](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/). -diff --git a/docs/sources/contributing.md b/docs/sources/contributing.md -index b311d13..0a31cb2 100644 ---- a/docs/sources/contributing.md -+++ b/docs/sources/contributing.md -@@ -1,7 +1,6 @@ --# Contributing - --## Contents: -+# Contributing - --- [Contributing to Docker](contributing/) --- [Setting Up a Dev Environment](devenvironment/) -+- [Contributing to Docker](contributing/) -+- [Setting Up a Dev Environment](devenvironment/) - -diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md -index 3b77acf..76df680 100644 ---- a/docs/sources/contributing/devenvironment.md -+++ b/docs/sources/contributing/devenvironment.md -@@ -10,7 +10,7 @@ used for all tests, builds and releases. The standard development - environment defines all build dependencies: system libraries and - binaries, go environment, go dependencies, etc. - --## Install Docker -+## Step 1: Install Docker - - Docker’s build environment itself is a Docker container, so the first - step is to install Docker on your system. -@@ -20,7 +20,7 @@ system](https://docs.docker.io/en/latest/installation/). Make sure you - have a working, up-to-date docker installation, then continue to the - next step. - --## Install tools used for this tutorial -+## Step 2: Install tools used for this tutorial - - Install `git`; honest, it’s very good. You can use - other ways to get the Docker source, but they’re not anywhere near as -@@ -30,7 +30,7 @@ Install `make`. This tutorial uses our base Makefile - to kick off the docker containers in a repeatable and consistent way. - Again, you can do it in other ways but you need to do more work. - --## Check out the Source -+## Step 3: Check out the Source - - git clone http://git@github.com/dotcloud/docker - cd docker -@@ -38,7 +38,7 @@ Again, you can do it in other ways but you need to do more work. - To checkout a different revision just use `git checkout`{.docutils - .literal} with the name of branch or revision number. - --## Build the Environment -+## Step 4: Build the Environment - - This following command will build a development environment using the - Dockerfile in the current directory. Essentially, it will install all -@@ -50,7 +50,7 @@ This command will take some time to complete when you first execute it. - If the build is successful, congratulations! You have produced a clean - build of docker, neatly encapsulated in a standard build environment. - --## Build the Docker Binary -+## Step 5: Build the Docker Binary - - To create the Docker binary, run this command: - -@@ -73,7 +73,7 @@ Note - Its safer to run the tests below before swapping your hosts docker - binary. - --## Run the Tests -+## Step 5: Run the Tests - - To execute the test cases, run this command: - -@@ -114,7 +114,7 @@ eg. - - > TESTFLAGS=’-run \^TestBuild\$’ make test - --## Use Docker -+## Step 6: Use Docker - - You can run an interactive session in the newly built container: - -@@ -122,7 +122,7 @@ You can run an interactive session in the newly built container: - - # type 'exit' or Ctrl-D to exit - --## Build And View The Documentation -+## Extra Step: Build and view the Documentation - - If you want to read the documentation from a local website, or are - making changes to it, you can build the documentation and then serve it -diff --git a/docs/sources/examples.md b/docs/sources/examples.md -index 98b3d25..81ad1de 100644 ---- a/docs/sources/examples.md -+++ b/docs/sources/examples.md -@@ -1,25 +1,23 @@ - - # Examples - --## Introduction: -- - Here are some examples of how to use Docker to create running processes, - starting from a very simple *Hello World* and progressing to more - substantial services like those which you might find in production. - --## Contents: -- --- [Check your Docker install](hello_world/) --- [Hello World](hello_world/#hello-world) --- [Hello World Daemon](hello_world/#hello-world-daemon) --- [Node.js Web App](nodejs_web_app/) --- [Redis Service](running_redis_service/) --- [SSH Daemon Service](running_ssh_service/) --- [CouchDB Service](couchdb_data_volumes/) --- [PostgreSQL Service](postgresql_service/) --- [Building an Image with MongoDB](mongodb/) --- [Riak Service](running_riak_service/) --- [Using Supervisor with Docker](using_supervisord/) --- [Process Management with CFEngine](cfengine_process_management/) --- [Python Web App](python_web_app/) -+- [Check your Docker install](hello_world/) -+- [Hello World](hello_world/#hello-world) -+- [Hello World Daemon](hello_world/#hello-world-daemon) -+- [Node.js Web App](nodejs_web_app/) -+- [Redis Service](running_redis_service/) -+- [SSH Daemon Service](running_ssh_service/) -+- [CouchDB Service](couchdb_data_volumes/) -+- [PostgreSQL Service](postgresql_service/) -+- [Building an Image with MongoDB](mongodb/) -+- [Riak Service](running_riak_service/) -+- [Using Supervisor with Docker](using_supervisord/) -+- [Process Management with CFEngine](cfengine_process_management/) -+- [Python Web App](python_web_app/) -+- [Apt-Cacher-ng Service](apt-cacher-ng/) -+- [Running Docker with https](https/) - -diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md -index c4d478e..9665bb0 100644 ---- a/docs/sources/examples/couchdb_data_volumes.md -+++ b/docs/sources/examples/couchdb_data_volumes.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - Here’s an example of using data volumes to share the same data between - two CouchDB containers. This could be used for hot upgrades, testing -diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md -index 8f2ae58..a9b0d7d 100644 ---- a/docs/sources/examples/hello_world.md -+++ b/docs/sources/examples/hello_world.md -@@ -2,7 +2,7 @@ page_title: Hello world example - page_description: A simple hello world example with Docker - page_keywords: docker, example, hello world - --# Check your Docker installation -+# Check your Docker install - - This guide assumes you have a working installation of Docker. To check - your Docker install, run the following command: -@@ -18,7 +18,7 @@ privileges to access docker on your machine. - Please refer to [*Installation*](../../installation/#installation-list) - for installation instructions. - --## Hello World -+# Hello World - - Note - -@@ -27,6 +27,8 @@ Note - install*](#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - This is the most basic example available for using Docker. - -@@ -59,7 +61,9 @@ standard out. - - See the example in action - --## Hello World Daemon -+* * * * * -+ -+# Hello World Daemon - - Note - -@@ -68,6 +72,8 @@ Note - install*](#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - And now for the most boring daemon ever written! - -@@ -77,7 +83,7 @@ continue to do this until we stop it. - - **Steps:** - -- CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") -+ container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") - - We are going to run a simple hello world daemon in a new container made - from the `ubuntu` image. -@@ -89,31 +95,31 @@ from the `ubuntu` image. - - **“while true; do echo hello world; sleep 1; done”** is the mini - script we want to run, that will just print hello world once a - second until we stop it. --- **\$CONTAINER\_ID** the output of the run command will return a -+- **\$container\_id** the output of the run command will return a - container id, we can use in future commands to see what is going on - with this process. - - - -- sudo docker logs $CONTAINER_ID -+ sudo docker logs $container_id - - Check the logs make sure it is working correctly. - - - **“docker logs**” This will return the logs for a container --- **\$CONTAINER\_ID** The Id of the container we want the logs for. -+- **\$container\_id** The Id of the container we want the logs for. - - - -- sudo docker attach -sig-proxy=false $CONTAINER_ID -+ sudo docker attach --sig-proxy=false $container_id - - Attach to the container to see the results in real-time. - - - **“docker attach**” This will allow us to attach to a background - process to see what is going on. --- **“-sig-proxy=false”** Do not forward signals to the container; -+- **“–sig-proxy=false”** Do not forward signals to the container; - allows us to exit the attachment using Control-C without stopping - the container. --- **\$CONTAINER\_ID** The Id of the container we want to attach too. -+- **\$container\_id** The Id of the container we want to attach too. - - Exit from the container attachment by pressing Control-C. - -@@ -125,12 +131,12 @@ Check the process list to make sure it is running. - - - -- sudo docker stop $CONTAINER_ID -+ sudo docker stop $container_id - - Stop the container, since we don’t need it anymore. - - - **“docker stop”** This stops a container --- **\$CONTAINER\_ID** The Id of the container we want to stop. -+- **\$container\_id** The Id of the container we want to stop. - - - -diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md -index 6612bf3..3708c18 100644 ---- a/docs/sources/examples/mongodb.md -+++ b/docs/sources/examples/mongodb.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - The goal of this example is to show how you can build your own Docker - images with MongoDB pre-installed. We will do that by constructing a -@@ -43,7 +45,7 @@ we’ll divert `/sbin/initctl` to - - # Hack for initctl not being available in Ubuntu - RUN dpkg-divert --local --rename --add /sbin/initctl -- RUN ln -s /bin/true /sbin/initctl -+ RUN ln -sf /bin/true /sbin/initctl - - Afterwards we’ll be able to update our apt repositories and install - MongoDB -@@ -75,10 +77,10 @@ Now you should be able to run `mongod` as a daemon - and be able to connect on the local port! - - # Regular style -- MONGO_ID=$(sudo docker run -d /mongodb) -+ MONGO_ID=$(sudo docker run -P -d /mongodb) - - # Lean and mean -- MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) -+ MONGO_ID=$(sudo docker run -P -d /mongodb --noprealloc --smallfiles) - - # Check the logs out - sudo docker logs $MONGO_ID -diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md -index 8d692d8..59e6c77 100644 ---- a/docs/sources/examples/nodejs_web_app.md -+++ b/docs/sources/examples/nodejs_web_app.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - The goal of this example is to show you how you can build your own - Docker images from a parent image using a `Dockerfile`{.docutils -@@ -82,7 +84,7 @@ CentOS, we’ll use the instructions from the [Node.js - wiki](https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6): - - # Enable EPEL for Node.js -- RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm -+ RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm - # Install Node.js and npm - RUN yum install -y npm - -diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md -index 211dcb2..b87d121 100644 ---- a/docs/sources/examples/postgresql_service.md -+++ b/docs/sources/examples/postgresql_service.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - ## Installing PostgreSQL on Docker - -@@ -34,7 +36,7 @@ suitably secure. - - # Add the PostgreSQL PGP key to verify their Debian packages. - # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc -- RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 -+ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 - - # Add PostgreSQL's repository. It contains the most recent stable release - # of PostgreSQL, ``9.3``. -@@ -85,7 +87,7 @@ Build an image from the Dockerfile assign it a name. - - And run the PostgreSQL server container (in the foreground): - -- $ sudo docker run -rm -P -name pg_test eg_postgresql -+ $ sudo docker run --rm -P --name pg_test eg_postgresql - - There are 2 ways to connect to the PostgreSQL server. We can use [*Link - Containers*](../../use/working_with_links_names/#working-with-links-names), -@@ -93,17 +95,17 @@ or we can access it from our host (or the network). - - Note - --The `-rm` removes the container and its image when -+The `--rm` removes the container and its image when - the container exists successfully. - - ### Using container linking - - Containers can be linked to another container’s ports directly using --`-link remote_name:local_alias` in the client’s -+`--link remote_name:local_alias` in the client’s - `docker run`. This will set a number of environment - variables that can then be used to connect: - -- $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash -+ $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash - - postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password - -@@ -145,7 +147,7 @@ prompt, you can create a table and populate it. - You can use the defined volumes to inspect the PostgreSQL log files and - to backup your configuration and data: - -- docker run -rm --volumes-from pg_test -t -i busybox sh -+ docker run --rm --volumes-from pg_test -t -i busybox sh - - / # ls - bin etc lib linuxrc mnt proc run sys usr -diff --git a/docs/sources/examples/python_web_app.md b/docs/sources/examples/python_web_app.md -index b5854a4..8c0d783 100644 ---- a/docs/sources/examples/python_web_app.md -+++ b/docs/sources/examples/python_web_app.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - While using Dockerfiles is the preferred way to create maintainable and - repeatable images, its useful to know how you can try things out and -@@ -52,7 +54,7 @@ the `$URL` variable. The container is given a name - While this example is simple, you could run any number of interactive - commands, try things out, and then exit when you’re done. - -- $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash -+ $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash - - $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz - $$ /usr/local/bin/buildapp $URL -diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md -index 81114e6..c0511a9 100644 ---- a/docs/sources/examples/running_redis_service.md -+++ b/docs/sources/examples/running_redis_service.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - Very simple, no frills, Redis service attached to a web application - using a link. -@@ -20,11 +22,11 @@ using a link. - Firstly, we create a `Dockerfile` for our new Redis - image. - -- FROM ubuntu:12.10 -- RUN apt-get update -- RUN apt-get -y install redis-server -+ FROM debian:jessie -+ RUN apt-get update && apt-get install -y redis-server - EXPOSE 6379 - ENTRYPOINT ["/usr/bin/redis-server"] -+ CMD ["--bind", "0.0.0.0"] - - Next we build an image from our `Dockerfile`. - Replace `` with your own user name. -@@ -48,7 +50,7 @@ database. - ## Create your web application container - - Next we can create a container for our application. We’re going to use --the `-link` flag to create a link to the -+the `--link` flag to create a link to the - `redis` container we’ve just created with an alias - of `db`. This will create a secure tunnel to the - `redis` container and expose the Redis instance -diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md -index e7171d8..c1b95e7 100644 ---- a/docs/sources/examples/running_riak_service.md -+++ b/docs/sources/examples/running_riak_service.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - The goal of this example is to show you how to build a Docker image with - Riak pre-installed. -@@ -85,7 +87,7 @@ Almost there. Next, we add a hack to get us by the lack of - # Hack for initctl - # See: https://github.com/dotcloud/docker/issues/1024 - RUN dpkg-divert --local --rename --add /sbin/initctl -- RUN ln -s /bin/true /sbin/initctl -+ RUN ln -sf /bin/true /sbin/initctl - - Then, we expose the Riak Protocol Buffers and HTTP interfaces, along - with SSH: -diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md -index 112b9fa..2a0acfa 100644 ---- a/docs/sources/examples/running_ssh_service.md -+++ b/docs/sources/examples/running_ssh_service.md -@@ -4,12 +4,15 @@ page_keywords: docker, example, package installation, networking - - # SSH Daemon Service - --> **Note:** --> - This example assumes you have Docker running in daemon mode. For --> more information please see [*Check your Docker --> install*](../hello_world/#running-examples). --> - **If you don’t like sudo** then see [*Giving non-root --> access*](../../installation/binaries/#dockergroup) -+Note -+ -+- This example assumes you have Docker running in daemon mode. For -+ more information please see [*Check your Docker -+ install*](../hello_world/#running-examples). -+- **If you don’t like sudo** then see [*Giving non-root -+ access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - The following Dockerfile sets up an sshd service in a container that you - can use to connect to and inspect other container’s volumes, or to get -@@ -35,12 +38,12 @@ quick access to a test container. - - Build the image using: - -- $ sudo docker build -rm -t eg_sshd . -+ $ sudo docker build -t eg_sshd . - - Then run it. You can then use `docker port` to find - out what host port the container’s port 22 is mapped to: - -- $ sudo docker run -d -P -name test_sshd eg_sshd -+ $ sudo docker run -d -P --name test_sshd eg_sshd - $ sudo docker port test_sshd 22 - 0.0.0.0:49154 - -diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/examples/using_supervisord.md -index d64b300..8d6e796 100644 ---- a/docs/sources/examples/using_supervisord.md -+++ b/docs/sources/examples/using_supervisord.md -@@ -11,6 +11,8 @@ Note - install*](../hello_world/#running-examples). - - **If you don’t like sudo** then see [*Giving non-root - access*](../../installation/binaries/#dockergroup) -+- **If you’re using OS X or docker via TCP** then you shouldn’t use -+ sudo - - Traditionally a Docker container runs a single process when it is - launched, for example an Apache daemon or a SSH server daemon. Often -diff --git a/docs/sources/faq.md b/docs/sources/faq.md -index 06da238..4977f73 100644 ---- a/docs/sources/faq.md -+++ b/docs/sources/faq.md -@@ -1,122 +1,128 @@ -+page_title: FAQ -+page_description: Most frequently asked questions. -+page_keywords: faq, questions, documentation, docker -+ - # FAQ - - ## Most frequently asked questions. - - ### How much does Docker cost? - --Docker is 100% free, it is open source, so you can use it without --paying. -+> Docker is 100% free, it is open source, so you can use it without -+> paying. - - ### What open source license are you using? - --We are using the Apache License Version 2.0. --You can see it [here](https://github.com/dotcloud/docker/blob/master/LICENSE). -+> We are using the Apache License Version 2.0, see it here: -+> [https://github.com/dotcloud/docker/blob/master/LICENSE](https://github.com/dotcloud/docker/blob/master/LICENSE) - - ### Does Docker run on Mac OS X or Windows? - --Not at this time, Docker currently only runs on Linux, but you can use --VirtualBox to run Docker in a virtual machine on your box, and get the --best of both worlds. Check out the [*Mac OSX*](../installation/mac/#macosx) and --[*Windows*](../installation/windows/#windows) installation guides. The --small Linux distribution *boot2docker* can be run inside virtual --machines on these two operating systems. -+> Not at this time, Docker currently only runs on Linux, but you can use -+> VirtualBox to run Docker in a virtual machine on your box, and get the -+> best of both worlds. Check out the [*Mac OS -+> X*](../installation/mac/#macosx) and [*Microsoft -+> Windows*](../installation/windows/#windows) installation guides. The -+> small Linux distribution boot2docker can be run inside virtual -+> machines on these two operating systems. - - ### How do containers compare to virtual machines? - --They are complementary. VMs are best used to allocate chunks of --hardware resources. Containers operate at the process level, which --makes them very lightweight and perfect as a unit of software --delivery. -+> They are complementary. VMs are best used to allocate chunks of -+> hardware resources. Containers operate at the process level, which -+> makes them very lightweight and perfect as a unit of software -+> delivery. - - ### What does Docker add to just plain LXC? - --Docker is not a replacement for LXC. “LXC” refers to capabilities of --the Linux kernel (specifically namespaces and control groups) which --allow sandboxing processes from one another, and controlling their --resource allocations. On top of this low-level foundation of kernel --features, Docker offers a high-level tool with several powerful --functionalities: -- -- - **Portable deployment across machines:** -- Docker defines a format for bundling an application and all -- its dependencies into a single object which can be transferred -- to any Docker-enabled machine, and executed there with the -- guarantee that the execution environment exposed to the -- application will be the same. LXC implements process -- sandboxing, which is an important pre-requisite for portable -- deployment, but that alone is not enough for portable -- deployment. If you sent me a copy of your application -- installed in a custom LXC configuration, it would almost -- certainly not run on my machine the way it does on yours, -- because it is tied to your machine’s specific configuration: -- networking, storage, logging, distro, etc. Docker defines an -- abstraction for these machine-specific settings, so that the -- exact same Docker container can run - unchanged - on many -- different machines, with many different configurations. -- -- - **Application-centric:** -- Docker is optimized for the deployment of applications, as -- opposed to machines. This is reflected in its API, user -- interface, design philosophy and documentation. By contrast, -- the `lxc` helper scripts focus on -- containers as lightweight machines - basically servers that -- boot faster and need less RAM. We think there’s more to -- containers than just that. -- -- - **Automatic build:** -- Docker includes [*a tool for developers to automatically -- assemble a container from their source -- code*](../reference/builder/#dockerbuilder), with full control -- over application dependencies, build tools, packaging etc. -- They are free to use -- `make, maven, chef, puppet, salt,` Debian -- packages, RPMs, source tarballs, or any combination of the -- above, regardless of the configuration of the machines. -- -- - **Versioning:** -- Docker includes git-like capabilities for tracking successive -- versions of a container, inspecting the diff between versions, -- committing new versions, rolling back etc. The history also -- includes how a container was assembled and by whom, so you get -- full traceability from the production server all the way back -- to the upstream developer. Docker also implements incremental -- uploads and downloads, similar to `git pull`{.docutils -- .literal}, so new versions of a container can be transferred -- by only sending diffs. -- -- - **Component re-use:** -- Any container can be used as a [*“base -- image”*](../terms/image/#base-image-def) to create more -- specialized components. This can be done manually or as part -- of an automated build. For example you can prepare the ideal -- Python environment, and use it as a base for 10 different -- applications. Your ideal Postgresql setup can be re-used for -- all your future projects. And so on. -- -- - **Sharing:** -- Docker has access to a [public registry](http://index.docker.io) -- where thousands of people have uploaded useful containers: anything -- from Redis, CouchDB, Postgres to IRC bouncers to Rails app servers to -- Hadoop to base images for various Linux distros. The -- [*registry*](../reference/api/registry_index_spec/#registryindexspec) -- also includes an official “standard library” of useful -- containers maintained by the Docker team. The registry itself -- is open-source, so anyone can deploy their own registry to -- store and transfer private containers, for internal server -- deployments for example. -- -- - **Tool ecosystem:** -- Docker defines an API for automating and customizing the -- creation and deployment of containers. There are a huge number -- of tools integrating with Docker to extend its capabilities. -- PaaS-like deployment (Dokku, Deis, Flynn), multi-node -- orchestration (Maestro, Salt, Mesos, Openstack Nova), -- management dashboards (docker-ui, Openstack Horizon, -- Shipyard), configuration management (Chef, Puppet), continuous -- integration (Jenkins, Strider, Travis), etc. Docker is rapidly -- establishing itself as the standard for container-based -- tooling. -- -+> Docker is not a replacement for LXC. “LXC” refers to capabilities of -+> the Linux kernel (specifically namespaces and control groups) which -+> allow sandboxing processes from one another, and controlling their -+> resource allocations. On top of this low-level foundation of kernel -+> features, Docker offers a high-level tool with several powerful -+> functionalities: -+> -+> - *Portable deployment across machines.* -+> : Docker defines a format for bundling an application and all -+> its dependencies into a single object which can be transferred -+> to any Docker-enabled machine, and executed there with the -+> guarantee that the execution environment exposed to the -+> application will be the same. LXC implements process -+> sandboxing, which is an important pre-requisite for portable -+> deployment, but that alone is not enough for portable -+> deployment. If you sent me a copy of your application -+> installed in a custom LXC configuration, it would almost -+> certainly not run on my machine the way it does on yours, -+> because it is tied to your machine’s specific configuration: -+> networking, storage, logging, distro, etc. Docker defines an -+> abstraction for these machine-specific settings, so that the -+> exact same Docker container can run - unchanged - on many -+> different machines, with many different configurations. -+> -+> - *Application-centric.* -+> : Docker is optimized for the deployment of applications, as -+> opposed to machines. This is reflected in its API, user -+> interface, design philosophy and documentation. By contrast, -+> the `lxc` helper scripts focus on -+> containers as lightweight machines - basically servers that -+> boot faster and need less RAM. We think there’s more to -+> containers than just that. -+> -+> - *Automatic build.* -+> : Docker includes [*a tool for developers to automatically -+> assemble a container from their source -+> code*](../reference/builder/#dockerbuilder), with full control -+> over application dependencies, build tools, packaging etc. -+> They are free to use -+> `make, maven, chef, puppet, salt,` Debian -+> packages, RPMs, source tarballs, or any combination of the -+> above, regardless of the configuration of the machines. -+> -+> - *Versioning.* -+> : Docker includes git-like capabilities for tracking successive -+> versions of a container, inspecting the diff between versions, -+> committing new versions, rolling back etc. The history also -+> includes how a container was assembled and by whom, so you get -+> full traceability from the production server all the way back -+> to the upstream developer. Docker also implements incremental -+> uploads and downloads, similar to `git pull`{.docutils -+> .literal}, so new versions of a container can be transferred -+> by only sending diffs. -+> -+> - *Component re-use.* -+> : Any container can be used as a [*“base -+> image”*](../terms/image/#base-image-def) to create more -+> specialized components. This can be done manually or as part -+> of an automated build. For example you can prepare the ideal -+> Python environment, and use it as a base for 10 different -+> applications. Your ideal Postgresql setup can be re-used for -+> all your future projects. And so on. -+> -+> - *Sharing.* -+> : Docker has access to a [public -+> registry](http://index.docker.io) where thousands of people -+> have uploaded useful containers: anything from Redis, CouchDB, -+> Postgres to IRC bouncers to Rails app servers to Hadoop to -+> base images for various Linux distros. The -+> [*registry*](../reference/api/registry_index_spec/#registryindexspec) -+> also includes an official “standard library” of useful -+> containers maintained by the Docker team. The registry itself -+> is open-source, so anyone can deploy their own registry to -+> store and transfer private containers, for internal server -+> deployments for example. -+> -+> - *Tool ecosystem.* -+> : Docker defines an API for automating and customizing the -+> creation and deployment of containers. There are a huge number -+> of tools integrating with Docker to extend its capabilities. -+> PaaS-like deployment (Dokku, Deis, Flynn), multi-node -+> orchestration (Maestro, Salt, Mesos, Openstack Nova), -+> management dashboards (docker-ui, Openstack Horizon, -+> Shipyard), configuration management (Chef, Puppet), continuous -+> integration (Jenkins, Strider, Travis), etc. Docker is rapidly -+> establishing itself as the standard for container-based -+> tooling. -+> - ### What is different between a Docker container and a VM? - - There’s a great StackOverflow answer [showing the -@@ -159,22 +165,22 @@ here](http://docs.docker.io/en/latest/examples/using_supervisord/). - - ### What platforms does Docker run on? - --**Linux:** -+Linux: - --- Ubuntu 12.04, 13.04 et al --- Fedora 19/20+ --- RHEL 6.5+ --- Centos 6+ --- Gentoo --- ArchLinux --- openSUSE 12.3+ --- CRUX 3.0+ -+- Ubuntu 12.04, 13.04 et al -+- Fedora 19/20+ -+- RHEL 6.5+ -+- Centos 6+ -+- Gentoo -+- ArchLinux -+- openSUSE 12.3+ -+- CRUX 3.0+ - --**Cloud:** -+Cloud: - --- Amazon EC2 --- Google Compute Engine --- Rackspace -+- Amazon EC2 -+- Google Compute Engine -+- Rackspace - - ### How do I report a security issue with Docker? - -@@ -196,14 +202,17 @@ sources. - - ### Where can I find more answers? - --You can find more answers on: -- --- [Docker user mailinglist](https://groups.google.com/d/forum/docker-user) --- [Docker developer mailinglist](https://groups.google.com/d/forum/docker-dev) --- [IRC, docker on freenode](irc://chat.freenode.net#docker) --- [GitHub](http://www.github.com/dotcloud/docker) --- [Ask questions on Stackoverflow](http://stackoverflow.com/search?q=docker) --- [Join the conversation on Twitter](http://twitter.com/docker) -+> You can find more answers on: -+> -+> - [Docker user -+> mailinglist](https://groups.google.com/d/forum/docker-user) -+> - [Docker developer -+> mailinglist](https://groups.google.com/d/forum/docker-dev) -+> - [IRC, docker on freenode](irc://chat.freenode.net#docker) -+> - [GitHub](http://www.github.com/dotcloud/docker) -+> - [Ask questions on -+> Stackoverflow](http://stackoverflow.com/search?q=docker) -+> - [Join the conversation on Twitter](http://twitter.com/docker) - - Looking for something else to read? Checkout the [*Hello - World*](../examples/hello_world/#hello-world) example. -diff --git a/docs/sources/genindex.md b/docs/sources/genindex.md -index 8b013d6..e9bcd34 100644 ---- a/docs/sources/genindex.md -+++ b/docs/sources/genindex.md -@@ -1 +1,2 @@ -+ - # Index -diff --git a/docs/sources/http-routingtable.md b/docs/sources/http-routingtable.md -index 2a06fdb..4ca4116 100644 ---- a/docs/sources/http-routingtable.md -+++ b/docs/sources/http-routingtable.md -@@ -1,3 +1,4 @@ -+ - # HTTP Routing Table - - [**/api**](#cap-/api) | [**/auth**](#cap-/auth) | -diff --git a/docs/sources/index.md b/docs/sources/index.md -index c5a5b6f..dd9e272 100644 ---- a/docs/sources/index.md -+++ b/docs/sources/index.md -@@ -1,3 +1 @@ --# Docker Documentation -- --## Introduction -\ No newline at end of file -+# Docker documentation -diff --git a/docs/sources/installation.md b/docs/sources/installation.md -index 0ee7b2f..4fdd102 100644 ---- a/docs/sources/installation.md -+++ b/docs/sources/installation.md -@@ -1,25 +1,26 @@ --# Installation - --## Introduction -+# Installation - - There are a number of ways to install Docker, depending on where you - want to run the daemon. The [*Ubuntu*](ubuntulinux/#ubuntu-linux) - installation is the officially-tested version. The community adds more - techniques for installing Docker all the time. - --## Contents: -+Contents: -+ -+- [Ubuntu](ubuntulinux/) -+- [Red Hat Enterprise Linux](rhel/) -+- [Fedora](fedora/) -+- [Arch Linux](archlinux/) -+- [CRUX Linux](cruxlinux/) -+- [Gentoo](gentoolinux/) -+- [openSUSE](openSUSE/) -+- [FrugalWare](frugalware/) -+- [Mac OS X](mac/) -+- [Microsoft Windows](windows/) -+- [Amazon EC2](amazon/) -+- [Rackspace Cloud](rackspace/) -+- [Google Cloud Platform](google/) -+- [IBM SoftLayer](softlayer/) -+- [Binaries](binaries/) - --- [Ubuntu](ubuntulinux/) --- [Red Hat Enterprise Linux](rhel/) --- [Fedora](fedora/) --- [Arch Linux](archlinux/) --- [CRUX Linux](cruxlinux/) --- [Gentoo](gentoolinux/) --- [openSUSE](openSUSE/) --- [FrugalWare](frugalware/) --- [Mac OS X](mac/) --- [Windows](windows/) --- [Amazon EC2](amazon/) --- [Rackspace Cloud](rackspace/) --- [Google Cloud Platform](google/) --- [Binaries](binaries/) -\ No newline at end of file -diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md -index 5d761de..0aa22ca 100644 ---- a/docs/sources/installation/binaries.md -+++ b/docs/sources/installation/binaries.md -@@ -23,14 +23,15 @@ packages for many distributions, and more keep showing up all the time! - To run properly, docker needs the following software to be installed at - runtime: - --- iproute2 version 3.5 or later (build after 2012-05-21), and -- specifically the “ip” utility - - iptables version 1.4 or later --- The LXC utility scripts -- ([http://lxc.sourceforge.net](http://lxc.sourceforge.net)) version -- 0.8 or later - - Git version 1.7 or later - - XZ Utils 4.9 or later -+- a [properly -+ mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) -+ cgroupfs hierarchy (having a single, all-encompassing “cgroup” mount -+ point [is](https://github.com/dotcloud/docker/issues/2683) -+ [not](https://github.com/dotcloud/docker/issues/3485) -+ [sufficient](https://github.com/dotcloud/docker/issues/4568)) - - ## Check kernel dependencies - -@@ -38,7 +39,7 @@ Docker in daemon mode has specific kernel requirements. For details, - check your distribution in [*Installation*](../#installation-list). - - Note that Docker also has a client mode, which can run on virtually any --linux kernel (it even builds on OSX!). -+Linux kernel (it even builds on OSX!). - - ## Get the docker binary: - -@@ -69,7 +70,9 @@ all the client commands. - - Warning - --The *docker* group is root-equivalent. -+The *docker* group (or the group specified with `-G`{.docutils -+.literal}) is root-equivalent; see [*Docker Daemon Attack -+Surface*](../../articles/security/#dockersecurity-daemon) details. - - ## Upgrades - -diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md -index 545e523..32f4fd2 100644 ---- a/docs/sources/installation/fedora.md -+++ b/docs/sources/installation/fedora.md -@@ -31,13 +31,14 @@ installed already, it will conflict with `docker-io`{.docutils - .literal}. There’s a [bug - report](https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for - it. To proceed with `docker-io` installation on --Fedora 19, please remove `docker` first. -+Fedora 19 or Fedora 20, please remove `docker` -+first. - - sudo yum -y remove docker - --For Fedora 20 and later, the `wmdocker` package will --provide the same functionality as `docker` and will --also not conflict with `docker-io`. -+For Fedora 21 and later, the `wmdocker` package will -+provide the same functionality as the old `docker` -+and will also not conflict with `docker-io`. - - sudo yum -y install wmdocker - sudo yum -y remove docker -diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md -index 8c83e87..b6e9889 100644 ---- a/docs/sources/installation/ubuntulinux.md -+++ b/docs/sources/installation/ubuntulinux.md -@@ -56,13 +56,13 @@ These instructions have changed for 0.6. If you are upgrading from an - earlier version, you will need to follow them again. - - Docker is available as a Debian package, which makes installation easy. --**See the :ref:\`installmirrors\` section below if you are not in the --United States.** Other sources of the Debian packages may be faster for --you to install. -+**See the** [*Docker and local DNS server warnings*](#installmirrors) -+**section below if you are not in the United States.** Other sources of -+the Debian packages may be faster for you to install. - - First add the Docker repository key to your local keychain. - -- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 -+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - - Add the Docker repository to your apt sources list, update and install - the `lxc-docker` package. -@@ -121,7 +121,7 @@ upgrading from an earlier version, you will need to follow them again. - - First add the Docker repository key to your local keychain. - -- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 -+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - - Add the Docker repository to your apt sources list, update and install - the `lxc-docker` package. -@@ -156,11 +156,15 @@ socket read/writable by the *docker* group when the daemon starts. The - `docker` daemon must always run as the root user, - but if you run the `docker` client as a user in the - *docker* group then you don’t need to add `sudo` to --all the client commands. -+all the client commands. As of 0.9.0, you can specify that a group other -+than `docker` should own the Unix socket with the -+`-G` option. - - Warning - --The *docker* group is root-equivalent. -+The *docker* group (or the group specified with `-G`{.docutils -+.literal}) is root-equivalent; see [*Docker Daemon Attack -+Surface*](../../articles/security/#dockersecurity-daemon) details. - - **Example:** - -@@ -259,9 +263,9 @@ Docker daemon for the containers: - sudo nano /etc/default/docker - --- - # Add: -- DOCKER_OPTS="-dns 8.8.8.8" -+ DOCKER_OPTS="--dns 8.8.8.8" - # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1 -- # multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1 -+ # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1 - - The Docker daemon has to be restarted: - -diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md -index ec3e706..ad367d9 100644 ---- a/docs/sources/installation/windows.md -+++ b/docs/sources/installation/windows.md -@@ -2,7 +2,7 @@ page_title: Installation on Windows - page_description: Please note this project is currently under heavy development. It should not be used in production. - page_keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker - --# Windows -+# Microsoft Windows - - Docker can run on Windows using a virtualization platform like - VirtualBox. A Linux distribution is run inside a virtual machine and -@@ -17,7 +17,7 @@ production yet, but we’re getting closer with each release. Please see - our blog post, [“Getting to Docker - 1.0”](http://blog.docker.io/2013/08/getting-to-docker-1-0/) - --1. Install virtualbox from -+1. Install VirtualBox from - [https://www.virtualbox.org](https://www.virtualbox.org) - or follow - this - [tutorial](http://www.slideshare.net/julienbarbier42/install-virtualbox-on-windows-7). -diff --git a/docs/sources/reference.md b/docs/sources/reference.md -index 3cd720c..1c4022e 100644 ---- a/docs/sources/reference.md -+++ b/docs/sources/reference.md -@@ -1,9 +1,10 @@ -+ - # Reference Manual - --## Contents: -+Contents: - --- [Commands](commandline/) --- [Dockerfile Reference](builder/) --- [Docker Run Reference](run/) --- [APIs](api/) -+- [Commands](commandline/) -+- [Dockerfile Reference](builder/) -+- [Docker Run Reference](run/) -+- [APIs](api/) - -diff --git a/docs/sources/reference/api.md b/docs/sources/reference/api.md -index ae55e6a..ce571bc 100644 ---- a/docs/sources/reference/api.md -+++ b/docs/sources/reference/api.md -@@ -1,3 +1,4 @@ -+ - # APIs - - Your programs and scripts can access Docker’s functionality via these -@@ -8,34 +9,28 @@ interfaces: - - [1.1 Index](registry_index_spec/#index) - - [1.2 Registry](registry_index_spec/#registry) - - [1.3 Docker](registry_index_spec/#docker) -- - - [2. Workflow](registry_index_spec/#workflow) - - [2.1 Pull](registry_index_spec/#pull) - - [2.2 Push](registry_index_spec/#push) - - [2.3 Delete](registry_index_spec/#delete) -- - - [3. How to use the Registry in standalone - mode](registry_index_spec/#how-to-use-the-registry-in-standalone-mode) - - [3.1 Without an - Index](registry_index_spec/#without-an-index) - - [3.2 With an Index](registry_index_spec/#with-an-index) -- - - [4. The API](registry_index_spec/#the-api) - - [4.1 Images](registry_index_spec/#images) - - [4.2 Users](registry_index_spec/#users) - - [4.3 Tags (Registry)](registry_index_spec/#tags-registry) - - [4.4 Images (Index)](registry_index_spec/#images-index) - - [4.5 Repositories](registry_index_spec/#repositories) -- - - [5. Chaining - Registries](registry_index_spec/#chaining-registries) - - [6. Authentication & - Authorization](registry_index_spec/#authentication-authorization) - - [6.1 On the Index](registry_index_spec/#on-the-index) - - [6.2 On the Registry](registry_index_spec/#on-the-registry) -- - - [7 Document Version](registry_index_spec/#document-version) -- - - [Docker Registry API](registry_api/) - - [1. Brief introduction](registry_api/#brief-introduction) - - [2. Endpoints](registry_api/#endpoints) -@@ -43,16 +38,13 @@ interfaces: - - [2.2 Tags](registry_api/#tags) - - [2.3 Repositories](registry_api/#repositories) - - [2.4 Status](registry_api/#status) -- - - [3 Authorization](registry_api/#authorization) -- - - [Docker Index API](index_api/) - - [1. Brief introduction](index_api/#brief-introduction) - - [2. Endpoints](index_api/#endpoints) - - [2.1 Repository](index_api/#repository) - - [2.2 Users](index_api/#users) - - [2.3 Search](index_api/#search) -- - - [Docker Remote API](docker_remote_api/) - - [1. Brief introduction](docker_remote_api/#brief-introduction) - - [2. Versions](docker_remote_api/#versions) -@@ -67,7 +59,6 @@ interfaces: - - [v1.2](docker_remote_api/#v1-2) - - [v1.1](docker_remote_api/#v1-1) - - [v1.0](docker_remote_api/#v1-0) -- - - [Docker Remote API Client Libraries](remote_api_client_libraries/) - - [docker.io OAuth API](docker_io_oauth_api/) - - [1. Brief introduction](docker_io_oauth_api/#brief-introduction) -@@ -79,10 +70,8 @@ interfaces: - - [3.2 Get an Access - Token](docker_io_oauth_api/#get-an-access-token) - - [3.3 Refresh a Token](docker_io_oauth_api/#refresh-a-token) -- - - [4. Use an Access Token with the - API](docker_io_oauth_api/#use-an-access-token-with-the-api) -- - - [docker.io Accounts API](docker_io_accounts_api/) - - [1. Endpoints](docker_io_accounts_api/#endpoints) - - [1.1 Get a single -@@ -96,4 +85,5 @@ interfaces: - - [1.5 Update an email address for a - user](docker_io_accounts_api/#update-an-email-address-for-a-user) - - [1.6 Delete email address for a -- user](docker_io_accounts_api/#delete-email-address-for-a-user) -\ No newline at end of file -+ user](docker_io_accounts_api/#delete-email-address-for-a-user) -+ -diff --git a/docs/sources/reference/api/docker_io_accounts_api.md b/docs/sources/reference/api/docker_io_accounts_api.md -index 6ad5361..dc78076 100644 ---- a/docs/sources/reference/api/docker_io_accounts_api.md -+++ b/docs/sources/reference/api/docker_io_accounts_api.md -@@ -2,35 +2,50 @@ page_title: docker.io Accounts API - page_description: API Documentation for docker.io accounts. - page_keywords: API, Docker, accounts, REST, documentation - --# Docker IO Accounts API -+# [docker.io Accounts API](#id1) - --## Endpoints -+Table of Contents - --### Get A Single User -+- [docker.io Accounts API](#docker-io-accounts-api) -+ - [1. Endpoints](#endpoints) -+ - [1.1 Get a single user](#get-a-single-user) -+ - [1.2 Update a single user](#update-a-single-user) -+ - [1.3 List email addresses for a -+ user](#list-email-addresses-for-a-user) -+ - [1.4 Add email address for a -+ user](#add-email-address-for-a-user) -+ - [1.5 Update an email address for a -+ user](#update-an-email-address-for-a-user) -+ - [1.6 Delete email address for a -+ user](#delete-email-address-for-a-user) -+ -+## [1. Endpoints](#id2) -+ -+### [1.1 Get a single user](#id3) - - `GET `{.descname}`/api/v1.1/users/:username/`{.descname} - : Get profile info for the specified user. - - Parameters: - -- - **username** – username of the user whose profile info is being -+ - **username** – username of the user whose profile info is being - requested. - - Request Headers: - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. - - Status Codes: - -- - **200** – success, user data returned. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **200** – success, user data returned. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being requested, OAuth access tokens must have - `profile_read` scope. -- - **404** – the specified username does not exist. -+ - **404** – the specified username does not exist. - - **Example request**: - -@@ -59,45 +74,45 @@ page_keywords: API, Docker, accounts, REST, documentation - "is_active": true - } - --### Update A Single User -+### [1.2 Update a single user](#id4) - - `PATCH `{.descname}`/api/v1.1/users/:username/`{.descname} - : Update profile info for the specified user. - - Parameters: - -- - **username** – username of the user whose profile info is being -+ - **username** – username of the user whose profile info is being - updated. - - Json Parameters: - -   - -- - **full\_name** (*string*) – (optional) the new name of the user. -- - **location** (*string*) – (optional) the new location. -- - **company** (*string*) – (optional) the new company of the user. -- - **profile\_url** (*string*) – (optional) the new profile url. -- - **gravatar\_email** (*string*) – (optional) the new Gravatar -+ - **full\_name** (*string*) – (optional) the new name of the user. -+ - **location** (*string*) – (optional) the new location. -+ - **company** (*string*) – (optional) the new company of the user. -+ - **profile\_url** (*string*) – (optional) the new profile url. -+ - **gravatar\_email** (*string*) – (optional) the new Gravatar - email address. - - Request Headers: - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. -- - **Content-Type** – MIME Type of post data. JSON, url-encoded -+ - **Content-Type** – MIME Type of post data. JSON, url-encoded - form data, etc. - - Status Codes: - -- - **200** – success, user data updated. -- - **400** – post data validation error. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **200** – success, user data updated. -+ - **400** – post data validation error. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being updated, OAuth access tokens must have - `profile_write` scope. -- - **404** – the specified username does not exist. -+ - **404** – the specified username does not exist. - - **Example request**: - -@@ -132,31 +147,31 @@ page_keywords: API, Docker, accounts, REST, documentation - "is_active": true - } - --### List Email Addresses For A User -+### [1.3 List email addresses for a user](#id5) - - `GET `{.descname}`/api/v1.1/users/:username/emails/`{.descname} - : List email info for the specified user. - - Parameters: - -- - **username** – username of the user whose profile info is being -+ - **username** – username of the user whose profile info is being - updated. - - Request Headers: - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token - - Status Codes: - -- - **200** – success, user data updated. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **200** – success, user data updated. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being requested, OAuth access tokens must have - `email_read` scope. -- - **404** – the specified username does not exist. -+ - **404** – the specified username does not exist. - - **Example request**: - -@@ -170,7 +185,7 @@ page_keywords: API, Docker, accounts, REST, documentation - HTTP/1.1 200 OK - Content-Type: application/json - -- -+ [ - { - "email": "jane.doe@example.com", - "verified": true, -@@ -178,7 +193,7 @@ page_keywords: API, Docker, accounts, REST, documentation - } - ] - --### Add Email Address For A User -+### [1.4 Add email address for a user](#id6) - - `POST `{.descname}`/api/v1.1/users/:username/emails/`{.descname} - : Add a new email address to the specified user’s account. The email -@@ -189,26 +204,26 @@ page_keywords: API, Docker, accounts, REST, documentation - -   - -- - **email** (*string*) – email address to be added. -+ - **email** (*string*) – email address to be added. - - Request Headers: - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. -- - **Content-Type** – MIME Type of post data. JSON, url-encoded -+ - **Content-Type** – MIME Type of post data. JSON, url-encoded - form data, etc. - - Status Codes: - -- - **201** – success, new email added. -- - **400** – data validation error. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **201** – success, new email added. -+ - **400** – data validation error. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being requested, OAuth access tokens must have - `email_write` scope. -- - **404** – the specified username does not exist. -+ - **404** – the specified username does not exist. - - **Example request**: - -@@ -233,7 +248,7 @@ page_keywords: API, Docker, accounts, REST, documentation - "primary": false - } - --### Update An Email Address For A User -+### [1.5 Update an email address for a user](#id7) - - `PATCH `{.descname}`/api/v1.1/users/:username/emails/`{.descname} - : Update an email address for the specified user to either verify an -@@ -244,17 +259,17 @@ page_keywords: API, Docker, accounts, REST, documentation - - Parameters: - -- - **username** – username of the user whose email info is being -+ - **username** – username of the user whose email info is being - updated. - - Json Parameters: - -   - -- - **email** (*string*) – the email address to be updated. -- - **verified** (*boolean*) – (optional) whether the email address -+ - **email** (*string*) – the email address to be updated. -+ - **verified** (*boolean*) – (optional) whether the email address - is verified, must be `true` or absent. -- - **primary** (*boolean*) – (optional) whether to set the email -+ - **primary** (*boolean*) – (optional) whether to set the email - address as the primary email, must be `true` - or absent. - -@@ -262,20 +277,20 @@ page_keywords: API, Docker, accounts, REST, documentation - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. -- - **Content-Type** – MIME Type of post data. JSON, url-encoded -+ - **Content-Type** – MIME Type of post data. JSON, url-encoded - form data, etc. - - Status Codes: - -- - **200** – success, user’s email updated. -- - **400** – data validation error. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **200** – success, user’s email updated. -+ - **400** – data validation error. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being updated, OAuth access tokens must have - `email_write` scope. -- - **404** – the specified username or email address does not -+ - **404** – the specified username or email address does not - exist. - - **Example request**: -@@ -303,7 +318,7 @@ page_keywords: API, Docker, accounts, REST, documentation - "primary": false - } - --### Delete Email Address For A User -+### [1.6 Delete email address for a user](#id8) - - `DELETE `{.descname}`/api/v1.1/users/:username/emails/`{.descname} - : Delete an email address from the specified user’s account. You -@@ -313,26 +328,26 @@ page_keywords: API, Docker, accounts, REST, documentation - -   - -- - **email** (*string*) – email address to be deleted. -+ - **email** (*string*) – email address to be deleted. - - Request Headers: - -   - -- - **Authorization** – required authentication credentials of -+ - **Authorization** – required authentication credentials of - either type HTTP Basic or OAuth Bearer Token. -- - **Content-Type** – MIME Type of post data. JSON, url-encoded -+ - **Content-Type** – MIME Type of post data. JSON, url-encoded - form data, etc. - - Status Codes: - -- - **204** – success, email address removed. -- - **400** – validation error. -- - **401** – authentication error. -- - **403** – permission error, authenticated user must be the user -+ - **204** – success, email address removed. -+ - **400** – validation error. -+ - **401** – authentication error. -+ - **403** – permission error, authenticated user must be the user - whose data is being requested, OAuth access tokens must have - `email_write` scope. -- - **404** – the specified username or email address does not -+ - **404** – the specified username or email address does not - exist. - - **Example request**: -@@ -350,4 +365,6 @@ page_keywords: API, Docker, accounts, REST, documentation - **Example response**: - - HTTP/1.1 204 NO CONTENT -- Content-Length: 0 -\ No newline at end of file -+ Content-Length: 0 -+ -+ -diff --git a/docs/sources/reference/api/docker_io_oauth_api.md b/docs/sources/reference/api/docker_io_oauth_api.md -index 85f3a22..c39ab56 100644 ---- a/docs/sources/reference/api/docker_io_oauth_api.md -+++ b/docs/sources/reference/api/docker_io_oauth_api.md -@@ -2,9 +2,21 @@ page_title: docker.io OAuth API - page_description: API Documentation for docker.io's OAuth flow. - page_keywords: API, Docker, oauth, REST, documentation - --# Docker IO OAuth API -+# [docker.io OAuth API](#id1) - --## Introduction -+Table of Contents -+ -+- [docker.io OAuth API](#docker-io-oauth-api) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Register Your Application](#register-your-application) -+ - [3. Endpoints](#endpoints) -+ - [3.1 Get an Authorization Code](#get-an-authorization-code) -+ - [3.2 Get an Access Token](#get-an-access-token) -+ - [3.3 Refresh a Token](#refresh-a-token) -+ - [4. Use an Access Token with the -+ API](#use-an-access-token-with-the-api) -+ -+## [1. Brief introduction](#id2) - - Some docker.io API requests will require an access token to - authenticate. To get an access token for a user, that user must first -@@ -12,13 +24,13 @@ grant your application access to their docker.io account. In order for - them to grant your application access you must first register your - application. - --Before continuing, we encourage you to familiarize yourself with The --OAuth 2.0 Authorization Framework](http://tools.ietf.org/c6749). -+Before continuing, we encourage you to familiarize yourself with [The -+OAuth 2.0 Authorization Framework](http://tools.ietf.org/html/rfc6749). - - *Also note that all OAuth interactions must take place over https - connections* - --## Registering Your Application -+## [2. Register Your Application](#id3) - - You will need to register your application with docker.io before users - will be able to grant your application access to their account -@@ -27,10 +39,10 @@ request registration of your application send an email to - [support-accounts@docker.com](mailto:support-accounts%40docker.com) with - the following information: - --- The name of your application --- A description of your application and the service it will provide to -+- The name of your application -+- A description of your application and the service it will provide to - docker.io users. --- A callback URI that we will use for redirecting authorization -+- A callback URI that we will use for redirecting authorization - requests to your application. These are used in the step of getting - an Authorization Code. The domain name of the callback URI will be - visible to the user when they are requested to authorize your -@@ -41,9 +53,9 @@ docker.io team with your `client_id` and - `client_secret` which your application will use in - the steps of getting an Authorization Code and getting an Access Token. - --## Endpoints -+## [3. Endpoints](#id4) - --### Get an Authorization Code -+### [3.1 Get an Authorization Code](#id5) - - Once You have registered you are ready to start integrating docker.io - accounts into your application! The process is usually started by a user -@@ -61,24 +73,24 @@ following a link in your application to an OAuth Authorization endpoint. - -   - -- - **client\_id** – The `client_id` given to -+ - **client\_id** – The `client_id` given to - your application at registration. -- - **response\_type** – MUST be set to `code`. -+ - **response\_type** – MUST be set to `code`. - This specifies that you would like an Authorization Code - returned. -- - **redirect\_uri** – The URI to redirect back to after the user -+ - **redirect\_uri** – The URI to redirect back to after the user - has authorized your application. If omitted, the first of your - registered `response_uris` is used. If - included, it must be one of the URIs which were submitted when - registering your application. -- - **scope** – The extent of access permissions you are requesting. -+ - **scope** – The extent of access permissions you are requesting. - Currently, the scope options are `profile_read`{.docutils - .literal}, `profile_write`, - `email_read`, and `email_write`{.docutils - .literal}. Scopes must be separated by a space. If omitted, the - default scopes `profile_read email_read` are - used. -- - **state** – (Recommended) Used by your application to maintain -+ - **state** – (Recommended) Used by your application to maintain - state between the authorization request and callback to protect - against CSRF attacks. - -@@ -115,7 +127,7 @@ following a link in your application to an OAuth Authorization endpoint. - : An error message in the event of the user denying the - authorization or some other kind of error with the request. - --### Get an Access Token -+### [3.2 Get an Access Token](#id6) - - Once the user has authorized your application, a request will be made to - your application’s specified `redirect_uri` which -@@ -131,7 +143,7 @@ to get an Access Token. - -   - -- - **Authorization** – HTTP basic authentication using your -+ - **Authorization** – HTTP basic authentication using your - application’s `client_id` and - `client_secret` - -@@ -139,11 +151,11 @@ to get an Access Token. - -   - -- - **grant\_type** – MUST be set to `authorization_code`{.docutils -+ - **grant\_type** – MUST be set to `authorization_code`{.docutils - .literal} -- - **code** – The authorization code received from the user’s -+ - **code** – The authorization code received from the user’s - redirect request. -- - **redirect\_uri** – The same `redirect_uri` -+ - **redirect\_uri** – The same `redirect_uri` - used in the authentication request. - - **Example Request** -@@ -180,7 +192,7 @@ to get an Access Token. - In the case of an error, there will be a non-200 HTTP Status and and - data detailing the error. - --### Refresh a Token -+### [3.3 Refresh a Token](#id7) - - Once the Access Token expires you can use your `refresh_token`{.docutils - .literal} to have docker.io issue your application a new Access Token, -@@ -195,7 +207,7 @@ if the user has not revoked access from your application. - -   - -- - **Authorization** – HTTP basic authentication using your -+ - **Authorization** – HTTP basic authentication using your - application’s `client_id` and - `client_secret` - -@@ -203,11 +215,11 @@ if the user has not revoked access from your application. - -   - -- - **grant\_type** – MUST be set to `refresh_token`{.docutils -+ - **grant\_type** – MUST be set to `refresh_token`{.docutils - .literal} -- - **refresh\_token** – The `refresh_token` -+ - **refresh\_token** – The `refresh_token` - which was issued to your application. -- - **scope** – (optional) The scope of the access token to be -+ - **scope** – (optional) The scope of the access token to be - returned. Must not include any scope not originally granted by - the user and if omitted is treated as equal to the scope - originally granted. -@@ -245,7 +257,7 @@ if the user has not revoked access from your application. - In the case of an error, there will be a non-200 HTTP Status and and - data detailing the error. - --## Use an Access Token with the API -+## [4. Use an Access Token with the API](#id8) - - Many of the docker.io API requests will require a Authorization request - header field. Simply ensure you add this header with “Bearer -diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md -index 35dd858..8a2e456 100644 ---- a/docs/sources/reference/api/docker_remote_api.md -+++ b/docs/sources/reference/api/docker_remote_api.md -@@ -4,21 +4,21 @@ page_keywords: API, Docker, rcli, REST, documentation - - # Docker Remote API - --## Introduction -- --- The Remote API is replacing rcli --- By default the Docker daemon listens on unix:///var/run/docker.sock -- and the client must have root access to interact with the daemon --- If a group named *docker* exists on your system, docker will apply -- ownership of the socket to the group --- The API tends to be REST, but for some complex commands, like attach -- or pull, the HTTP connection is hijacked to transport stdout stdin -- and stderr --- Since API version 1.2, the auth configuration is now handled client -- side, so the client has to send the authConfig as POST in -- `/images/(name)/push`. -- --## Docker Remote API Versions -+## 1. Brief introduction -+ -+- The Remote API is replacing rcli -+- By default the Docker daemon listens on unix:///var/run/docker.sock -+ and the client must have root access to interact with the daemon -+- If a group named *docker* exists on your system, docker will apply -+ ownership of the socket to the group -+- The API tends to be REST, but for some complex commands, like attach -+ or pull, the HTTP connection is hijacked to transport stdout stdin -+ and stderr -+- Since API version 1.2, the auth configuration is now handled client -+ side, so the client has to send the authConfig as POST in -+ /images/(name)/push -+ -+## 2. Versions - - The current version of the API is 1.10 - -@@ -28,25 +28,31 @@ Calling /images/\/insert is the same as calling - You can still call an old version of the api using - /v1.0/images/\/insert - --## Docker Remote API v1.10 -+### v1.10 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.10*](../docker_remote_api_v1.10/) - --### What’s new -+#### What’s new - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : **New!** You can now use the force parameter to force delete of an -- image, even if it’s tagged in multiple repositories. -+ image, even if it’s tagged in multiple repositories. **New!** You -+ can now use the noprune parameter to prevent the deletion of parent -+ images - --## Docker Remote API v1.9 -+ `DELETE `{.descname}`/containers/`{.descname}(*id*) -+: **New!** You can now use the force paramter to force delete a -+ container, even if it is currently running - --### Full Documentation -+### v1.9 -+ -+#### Full Documentation - - [*Docker Remote API v1.9*](../docker_remote_api_v1.9/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/build`{.descname} - : **New!** This endpoint now takes a serialized ConfigFile which it -@@ -54,13 +60,13 @@ You can still call an old version of the api using - base image. Clients which previously implemented the version - accepting an AuthConfig object must be updated. - --## Docker Remote API v1.8 -+### v1.8 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.8*](../docker_remote_api_v1.8/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/build`{.descname} - : **New!** This endpoint now returns build status as json stream. In -@@ -82,13 +88,13 @@ You can still call an old version of the api using - possible to get the current value and the total of the progress - without having to parse the string. - --## Docker Remote API v1.7 -+### v1.7 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.7*](../docker_remote_api_v1.7/) - --### What’s New -+#### What’s new - - `GET `{.descname}`/images/json`{.descname} - : The format of the json returned from this uri changed. Instead of an -@@ -175,17 +181,17 @@ You can still call an old version of the api using - ] - - `GET `{.descname}`/images/viz`{.descname} --: This URI no longer exists. The `images -viz` -+: This URI no longer exists. The `images --viz` - output is now generated in the client, using the - `/images/json` data. - --## Docker Remote API v1.6 -+### v1.6 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.6*](../docker_remote_api_v1.6/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : **New!** You can now split stderr from stdout. This is done by -@@ -195,13 +201,13 @@ You can still call an old version of the api using - The WebSocket attach is unchanged. Note that attach calls on the - previous API version didn’t change. Stdout and stderr are merged. - --## Docker Remote API v1.5 -+### v1.5 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.5*](../docker_remote_api_v1.5/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/images/create`{.descname} - : **New!** You can now pass registry credentials (via an AuthConfig -@@ -216,13 +222,13 @@ You can still call an old version of the api using - dicts each containing PublicPort, PrivatePort and Type describing a - port mapping. - --## Docker Remote API v1.4 -+### v1.4 - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.4*](../docker_remote_api_v1.4/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/images/create`{.descname} - : **New!** When pulling a repo, all images are now downloaded in -@@ -235,16 +241,16 @@ You can still call an old version of the api using - `GET `{.descname}`/events:`{.descname} - : **New!** Image’s name added in the events - --## Docker Remote API v1.3 -+### v1.3 - - docker v0.5.0 - [51f6c4a](https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909) - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.3*](../docker_remote_api_v1.3/) - --### What’s New -+#### What’s new - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List the processes running inside a container. -@@ -254,10 +260,10 @@ docker v0.5.0 - - Builder (/build): - --- Simplify the upload of the build context --- Simply stream a tarball instead of multipart upload with 4 -- intermediary buffers --- Simpler, less memory usage, less disk usage and faster -+- Simplify the upload of the build context -+- Simply stream a tarball instead of multipart upload with 4 -+ intermediary buffers -+- Simpler, less memory usage, less disk usage and faster - - Warning - -@@ -266,23 +272,23 @@ break on /build. - - List containers (/containers/json): - --- You can use size=1 to get the size of the containers -+- You can use size=1 to get the size of the containers - - Start containers (/containers/\/start): - --- You can now pass host-specific configuration (e.g. bind mounts) in -- the POST body for start calls -+- You can now pass host-specific configuration (e.g. bind mounts) in -+ the POST body for start calls - --## Docker Remote API v1.2 -+### v1.2 - - docker v0.4.2 - [2e7649b](https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168) - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.2*](../docker_remote_api_v1.2/) - --### What’s New -+#### What’s new - - The auth configuration is now handled by the client. - -@@ -302,16 +308,16 @@ The client should send it’s authConfig as POST on each call of - : Now returns a JSON structure with the list of images - deleted/untagged. - --## Docker Remote API v1.1 -+### v1.1 - - docker v0.4.0 - [a8ae398](https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f) - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.1*](../docker_remote_api_v1.1/) - --### What’s New -+#### What’s new - - `POST `{.descname}`/images/create`{.descname} - : -@@ -330,15 +336,15 @@ docker v0.4.0 - > {"error":"Invalid..."} - > ... - --## Docker Remote API v1.0 -+### v1.0 - - docker v0.3.4 - [8d73740](https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4) - --### Full Documentation -+#### Full Documentation - - [*Docker Remote API v1.0*](../docker_remote_api_v1.0/) - --### What’s New -+#### What’s new - - Initial version -diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md -index 6bb0fcb..30b1718 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.0.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.0.md -@@ -2,21 +2,70 @@ page_title: Remote API v1.0 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.0 -- --## Introduction -- --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+# [Docker Remote API v1.0](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.0](#docker-remote-api-v1-0) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Get default username and -+ email](#get-default-username-and-email) -+ - [Check auth configuration and store -+ it](#check-auth-configuration-and-store-it) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -65,22 +114,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -126,16 +175,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -198,11 +247,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem:] -+#### [Inspect changes on a container’s filesystem](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -233,11 +282,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -255,11 +304,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id10) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -274,11 +323,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -295,15 +344,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -320,15 +369,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -343,11 +392,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -367,25 +416,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -404,11 +453,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id16) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -425,19 +474,19 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id17) - --### List images: -+#### [List Images](#id18) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -498,16 +547,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id19) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -528,18 +577,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id20) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -557,10 +606,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id21) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -603,11 +652,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id22) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -636,11 +685,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id23) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -660,15 +709,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id24) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -685,17 +734,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id25) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -710,11 +759,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such image -- - **500** – server error -+ - **204** – no error -+ - **404** – no such image -+ - **500** – server error - --### Search images: -+#### [Search images](#id26) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -747,9 +796,9 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id27) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id28) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -770,15 +819,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name to be applied to the resulting image in -+ - **t** – repository name to be applied to the resulting image in - case of success - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --#### [Get default username and email -+#### [Get default username and email](#id29) - - `GET `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -799,10 +848,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: and store it -+#### [Check auth configuration and store it](#id30) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -824,11 +873,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id31) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -854,10 +903,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id32) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -879,10 +928,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id33) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -908,41 +957,41 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --## Going Further -+## [3. Going further](#id34) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id35) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id36) - - In this first version of the API, some of the endpoints, like /attach, - /pull or /push uses hijacking to transport stdin, stdout and stderr on -diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md -index 476b942..2d510f4 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.1.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.1.md -@@ -2,21 +2,70 @@ page_title: Remote API v1.1 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.1 -- --## Introduction -- --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+# [Docker Remote API v1.1](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.1](#docker-remote-api-v1-1) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Get default username and -+ email](#get-default-username-and-email) -+ - [Check auth configuration and store -+ it](#check-auth-configuration-and-store-it) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -65,22 +114,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -126,16 +175,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -198,11 +247,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem:] -+#### [Inspect changes on a container’s filesystem](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -233,11 +282,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -255,11 +304,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id10) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -274,11 +323,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -295,15 +344,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -320,15 +369,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -343,11 +392,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -367,25 +416,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -404,11 +453,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id16) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -425,19 +474,19 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id17) - --### List images: -+#### [List Images](#id18) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -498,16 +547,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id19) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -531,18 +580,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id20) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -564,10 +613,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id21) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -610,11 +659,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id22) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -643,11 +692,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id23) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -670,15 +719,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id24) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -695,18 +744,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id25) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -721,11 +770,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such image -- - **500** – server error -+ - **204** – no error -+ - **404** – no such image -+ - **500** – server error - --### Search images: -+#### [Search images](#id26) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -758,9 +807,9 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id27) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id28) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -781,15 +830,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – tag to be applied to the resulting image in case of -+ - **t** – tag to be applied to the resulting image in case of - success - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --#### [Get default username and email -+#### [Get default username and email](#id29) - - `GET `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -810,10 +859,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: and store it -+#### [Check auth configuration and store it](#id30) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -835,11 +884,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id31) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -865,10 +914,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id32) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -890,10 +939,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id33) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -919,41 +968,41 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --## Going Further -+## [3. Going further](#id34) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id35) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id36) - - In this version of the API, /attach uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. -diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md -index b6aa5bc..2a99f72 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.10.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.10.md -@@ -2,24 +2,80 @@ page_title: Remote API v1.10 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.10 -- --## Introduction -- --- The Remote API has replaced rcli --- The daemon listens on `unix:///var/run/docker.sock`{.docutils -+# [Docker Remote API v1.10](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.10](#docker-remote-api-v1-10) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [Get a tarball containing all images and tags in a -+ repository](#get-a-tarball-containing-all-images-and-tags-in-a-repository) -+ - [Load a tarball with a set of images and tags into -+ docker](#load-a-tarball-with-a-set-of-images-and-tags-into-docker) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API has replaced rcli -+- The daemon listens on `unix:///var/run/docker.sock`{.docutils - .literal}, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). --- The API tends to be REST, but for some complex commands, like -+- The API tends to be REST, but for some complex commands, like - `attach` or `pull`{.docutils .literal}, the HTTP - connection is hijacked to transport `stdout, stdin`{.docutils - .literal} and `stderr` - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -80,24 +136,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -130,6 +186,7 @@ page_keywords: API, Docker, rcli, REST, documentation - }, - "VolumesFrom":"", - "WorkingDir":"", -+ "DisableNetwork": false, - "ExposedPorts":{ - "22/tcp": {} - } -@@ -149,23 +206,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Query Parameters: - -   - -- - **name** – Assign the specified name to the container. Must -+ - **name** – Assign the specified name to the container. Must - match `/?[a-zA-Z0-9_-]+`. - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -246,11 +303,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -288,15 +345,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -327,11 +384,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -349,11 +406,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -380,15 +437,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -405,15 +462,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -430,15 +487,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -453,11 +510,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -477,23 +534,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - - **Stream details**: - -@@ -518,9 +575,9 @@ page_keywords: API, Docker, rcli, REST, documentation - - `STREAM_TYPE` can be: - -- - 0: stdin (will be writen on stdout) -- - 1: stdout -- - 2: stderr -+ - 0: stdin (will be writen on stdout) -+ - 1: stdout -+ - 2: stderr - - `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of - the uint32 size encoded as big endian. -@@ -539,7 +596,7 @@ page_keywords: API, Docker, rcli, REST, documentation - 4. Read the extracted size and output it on the correct output - 5. Goto 1) - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -558,11 +615,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -579,17 +636,19 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false -+ - **force** – 1/True/true or 0/False/false, Removes the container -+ even if it was running. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -612,13 +671,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/json`{.descname} - : **Example request**: -@@ -655,7 +714,7 @@ page_keywords: API, Docker, rcli, REST, documentation - } - ] - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -683,24 +742,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Request Headers: - -   - -- - **X-Registry-Auth** – base64-encoded AuthConfig object -+ - **X-Registry-Auth** – base64-encoded AuthConfig object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -722,10 +781,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -770,11 +829,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -803,11 +862,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -830,22 +889,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Request Headers: - -   - -- - **X-Registry-Auth** – include a base64-encoded AuthConfig -+ - **X-Registry-Auth** – include a base64-encoded AuthConfig - object. - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -862,18 +921,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -897,16 +956,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **force** – 1/True/true or 0/False/false, default false -+ - **force** – 1/True/true or 0/False/false, default false -+ - **noprune** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index. -@@ -954,16 +1014,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **term** – term to search -+ - **term** – term to search - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -995,25 +1055,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image - - Request Headers: - -   - -- - **Content-type** – should be set to -+ - **Content-type** – should be set to - `"application/tar"`. -- - **X-Registry-Config** – base64-encoded ConfigFile object -+ - **X-Registry-Config** – base64-encoded ConfigFile object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -1036,11 +1096,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -1067,10 +1127,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1092,10 +1152,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1115,22 +1175,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) -- - **run** – config automatically applied when the image is run. -+ - **run** – config automatically applied when the image is run. - (ex: {“Cmd”: [“cat”, “/world”], “PortSpecs”:[“22”]}) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1154,14 +1214,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Get a tarball containing all images and tags in a repository: -+#### [Get a tarball containing all images and tags in a repository](#id36) - - `GET `{.descname}`/images/`{.descname}(*name*)`/get`{.descname} - : Get a tarball containing all images and metadata for the repository -@@ -1180,10 +1240,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Load a tarball with a set of images and tags into docker: -+#### [Load a tarball with a set of images and tags into docker](#id37) - - `POST `{.descname}`/images/load`{.descname} - : Load a set of images and tags into the docker repository. -@@ -1200,38 +1260,38 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id38) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id39) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id40) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id41) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md -index 5a70c94..b11bce6 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.2.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.2.md -@@ -2,21 +2,68 @@ page_title: Remote API v1.2 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.2 -- --## Introduction -- --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+# [Docker Remote API v1.2](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.2](#docker-remote-api-v1-2) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -77,22 +124,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -138,16 +185,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -210,11 +257,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem:] -+#### [Inspect changes on a container’s filesystem](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -245,11 +292,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -267,11 +314,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id10) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -286,11 +333,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -307,15 +354,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -332,15 +379,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -355,11 +402,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -379,25 +426,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -416,11 +463,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id16) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -437,19 +484,19 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id17) - --### List images: -+#### [List Images](#id18) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -514,16 +561,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id19) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -547,18 +594,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id20) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -580,10 +627,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id21) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -627,11 +674,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id22) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -661,11 +708,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id23) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -689,15 +736,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id24) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -714,18 +761,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id25) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -747,12 +794,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **204** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id26) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -785,9 +832,9 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id27) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id28) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile -@@ -808,19 +855,19 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name to be applied to the resulting image in -+ - **t** – repository name to be applied to the resulting image in - case of success -- - **remote** – resource to fetch, as URI -+ - **remote** – resource to fetch, as URI - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - - {{ STREAM }} is the raw text output of the build command. It uses the - HTTP Hijack method in order to stream. - --### Check auth configuration: -+#### [Check auth configuration](#id29) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -847,13 +894,13 @@ HTTP Hijack method in order to stream. - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **401** – unauthorized -- - **403** – forbidden -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **401** – unauthorized -+ - **403** – forbidden -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id30) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -879,10 +926,10 @@ HTTP Hijack method in order to stream. - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id31) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -904,10 +951,10 @@ HTTP Hijack method in order to stream. - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id32) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -933,49 +980,49 @@ HTTP Hijack method in order to stream. - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --## Going Further -+## [3. Going further](#id33) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id34) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id35) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id36) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - - > docker -d -H=”[tcp://192.168.1.9:4243](tcp://192.168.1.9:4243)” --> -api-enable-cors -+> –api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md -index 7e0e6bd..4203699 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.3.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.3.md -@@ -2,74 +2,71 @@ page_title: Remote API v1.3 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.3 -+# [Docker Remote API v1.3](#id1) - - Table of Contents - --- [Docker Remote API v1.3](#docker-remote-api-v1-3) -- - [1. Brief introduction](#brief-introduction) -- - [2. Endpoints](#endpoints) -- - [2.1 Containers](#containers) -- - [List containers](#list-containers) -- - [Create a container](#create-a-container) -- - [Inspect a container](#inspect-a-container) -- - [List processes running inside a -+- [Docker Remote API v1.3](#docker-remote-api-v1-3) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a - container](#list-processes-running-inside-a-container) -- - [Inspect changes on a container’s -+ - [Inspect changes on a container’s - filesystem](#inspect-changes-on-a-container-s-filesystem) -- - [Export a container](#export-a-container) -- - [Start a container](#start-a-container) -- - [Stop a container](#stop-a-container) -- - [Restart a container](#restart-a-container) -- - [Kill a container](#kill-a-container) -- - [Attach to a container](#attach-to-a-container) -- - [Wait a container](#wait-a-container) -- - [Remove a container](#remove-a-container) -- -- - [2.2 Images](#images) -- - [List Images](#list-images) -- - [Create an image](#create-an-image) -- - [Insert a file in an image](#insert-a-file-in-an-image) -- - [Inspect an image](#inspect-an-image) -- - [Get the history of an -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an - image](#get-the-history-of-an-image) -- - [Push an image on the -+ - [Push an image on the - registry](#push-an-image-on-the-registry) -- - [Tag an image into a -+ - [Tag an image into a - repository](#tag-an-image-into-a-repository) -- - [Remove an image](#remove-an-image) -- - [Search images](#search-images) -- -- - [2.3 Misc](#misc) -- - [Build an image from Dockerfile via -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via - stdin](#build-an-image-from-dockerfile-via-stdin) -- - [Check auth configuration](#check-auth-configuration) -- - [Display system-wide -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide - information](#display-system-wide-information) -- - [Show the docker version -+ - [Show the docker version - information](#show-the-docker-version-information) -- - [Create a new image from a container’s -+ - [Create a new image from a container’s - changes](#create-a-new-image-from-a-container-s-changes) -- - [Monitor Docker’s events](#monitor-docker-s-events) -- -- - [3. Going further](#going-further) -- - [3.1 Inside ‘docker run’](#inside-docker-run) -- - [3.2 Hijacking](#hijacking) -- - [3.3 CORS Requests](#cors-requests) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) - --## Introduction -+## [1. Brief introduction](#id2) - --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -130,24 +127,24 @@ Table of Contents - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -193,16 +190,16 @@ Table of Contents - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -265,11 +262,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -300,11 +297,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -335,11 +332,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -357,11 +354,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -384,15 +381,15 @@ Table of Contents - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -409,15 +406,15 @@ Table of Contents - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -434,15 +431,15 @@ Table of Contents - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -457,11 +454,11 @@ Table of Contents - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -481,25 +478,25 @@ Table of Contents - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -518,11 +515,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -539,19 +536,19 @@ Table of Contents - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id18) - --### List images: -+#### [List Images](#id19) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -616,16 +613,16 @@ Table of Contents - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id20) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -649,18 +646,18 @@ Table of Contents - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id21) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -682,10 +679,10 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id22) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -729,11 +726,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -762,11 +759,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id24) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -790,15 +787,15 @@ Table of Contents - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -815,18 +812,18 @@ Table of Contents - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id26) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -848,12 +845,12 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id27) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -886,9 +883,9 @@ Table of Contents - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id28) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id29) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -917,16 +914,16 @@ Table of Contents - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -+ - **q** – suppress verbose build output - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id30) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -948,11 +945,11 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id31) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -981,10 +978,10 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id32) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1006,10 +1003,10 @@ Table of Contents - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id33) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1035,20 +1032,20 @@ Table of Contents - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id34) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1072,42 +1069,42 @@ Table of Contents - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id35) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id36) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id37) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id38) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - --> docker -d -H=”192.168.1.9:4243” -api-enable-cors -+> docker -d -H=”192.168.1.9:4243” –api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md -index f665b1e..4eca2a6 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.4.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.4.md -@@ -2,21 +2,73 @@ page_title: Remote API v1.4 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.4 -- --## Introduction -- --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+# [Docker Remote API v1.4](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.4](#docker-remote-api-v1-4) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -77,24 +129,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -143,16 +195,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -217,12 +269,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **409** – conflict between containers and images -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **409** – conflict between containers and images -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -260,15 +312,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -299,11 +351,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -321,11 +373,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -349,15 +401,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -374,15 +426,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -399,15 +451,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -422,11 +474,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -446,25 +498,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -483,11 +535,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -504,17 +556,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -537,13 +589,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -608,16 +660,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -641,18 +693,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -674,10 +726,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -722,12 +774,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict between containers and images -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict between containers and images -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -756,11 +808,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -782,14 +834,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error :statuscode 404: no such image :statuscode -+ - **200** – no error :statuscode 404: no such image :statuscode - 500: server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -806,18 +858,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -839,12 +891,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -877,9 +929,9 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -908,17 +960,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -941,11 +993,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -972,10 +1024,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -997,10 +1049,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1026,20 +1078,20 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1063,42 +1115,42 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id36) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id37) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id38) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id39) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md -index d9c3542..ff11cd1 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.5.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.5.md -@@ -2,21 +2,73 @@ page_title: Remote API v1.5 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.5 -- --## Introduction -- --- The Remote API is replacing rcli --- Default port in the docker daemon is 4243 --- The API tends to be REST, but for some complex commands, like attach -+# [Docker Remote API v1.5](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.5](#docker-remote-api-v1-5) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API is replacing rcli -+- Default port in the docker daemon is 4243 -+- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -77,24 +129,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -142,16 +194,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -215,11 +267,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -257,15 +309,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -296,11 +348,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -318,11 +370,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -346,15 +398,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -371,15 +423,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -396,15 +448,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -419,11 +471,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -443,25 +495,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -480,11 +532,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -501,17 +553,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -534,13 +586,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -605,16 +657,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -642,18 +694,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -675,10 +727,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -723,11 +775,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -756,11 +808,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -786,15 +838,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -811,18 +863,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -844,12 +896,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -882,16 +934,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **term** – term to search -+ - **term** – term to search - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -920,18 +972,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -- - **rm** – remove intermediate containers after a successful build -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image -+ - **rm** – remove intermediate containers after a successful build - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -954,11 +1006,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -985,10 +1037,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1010,10 +1062,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1039,20 +1091,20 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1076,37 +1128,37 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id36) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id37) - - Here are the steps of ‘docker run’ : - --- Create the container --- If the status code is 404, it means the image doesn’t exists: \* Try -+- Create the container -+- If the status code is 404, it means the image doesn’t exists: \* Try - to pull it \* Then retry to create the container --- Start the container --- If you are not in detached mode: \* Attach to the container, using -+- Start the container -+- If you are not in detached mode: \* Attach to the container, using - logs=1 (to have stdout and stderr from the container’s start) and - stream=1 --- If in detached mode or only stdin is attached: \* Display the -+- If in detached mode or only stdin is attached: \* Display the - container’s id - --### Hijacking -+### [3.2 Hijacking](#id38) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id39) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md -index 4455608..fd6a650 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.6.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.6.md -@@ -2,24 +2,76 @@ page_title: Remote API v1.6 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.6 -- --## Introduction -- --- The Remote API has replaced rcli --- The daemon listens on `unix:///var/run/docker.sock`{.docutils -+# [Docker Remote API v1.6](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.6](#docker-remote-api-v1-6) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API has replaced rcli -+- The daemon listens on `unix:///var/run/docker.sock`{.docutils - .literal}, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). --- The API tends to be REST, but for some complex commands, like -+- The API tends to be REST, but for some complex commands, like - `attach` or `pull`{.docutils .literal}, the HTTP - connection is hijacked to transport `stdout, stdin`{.docutils - .literal} and `stderr` - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -80,24 +132,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -144,20 +196,20 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Query Parameters: - -   - -- - **name** – container name to use -+ - **name** – container name to use - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - - **More Complex Example request, in 2 steps.** **First, use create to - expose a Private Port, which can be bound back to a Public Port at -@@ -202,7 +254,7 @@ page_keywords: API, Docker, rcli, REST, documentation - - **Now you can ssh into your new container on port 11022.** - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -267,11 +319,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -309,15 +361,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -348,11 +400,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -370,11 +422,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -403,15 +455,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -428,15 +480,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -453,15 +505,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -478,17 +530,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **signal** – Signal to send to the container (integer). When not -+ - **signal** – Signal to send to the container (integer). When not - set, SIGKILL is assumed and the call will waits for the - container to exit. - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -508,23 +560,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - - **Stream details**: - -@@ -549,9 +601,9 @@ page_keywords: API, Docker, rcli, REST, documentation - - `STREAM_TYPE` can be: - -- - 0: stdin (will be writen on stdout) -- - 1: stdout -- - 2: stderr -+ - 0: stdin (will be writen on stdout) -+ - 1: stdout -+ - 2: stderr - - `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of - the uint32 size encoded as big endian. -@@ -570,7 +622,7 @@ page_keywords: API, Docker, rcli, REST, documentation - 4. Read the extracted size and output it on the correct output - 5. Goto 1) - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -589,11 +641,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -610,17 +662,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -643,13 +695,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/`{.descname}(*format*) - : List images `format` could be json or viz (json -@@ -714,16 +766,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -751,18 +803,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -784,10 +836,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -832,11 +884,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -865,11 +917,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -893,14 +945,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Status Codes: - -- - **200** – no error :statuscode 404: no such image :statuscode -+ - **200** – no error :statuscode 404: no such image :statuscode - 500: server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -917,18 +969,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -950,12 +1002,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index -@@ -988,9 +1040,9 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -1019,17 +1071,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -1052,11 +1104,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -1083,10 +1135,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1108,10 +1160,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1137,20 +1189,20 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1174,42 +1226,42 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id36) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id37) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id38) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id39) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md -index 1d1bd27..0c8c962 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.7.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.7.md -@@ -2,24 +2,80 @@ page_title: Remote API v1.7 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.7 -- --## Introduction -- --- The Remote API has replaced rcli --- The daemon listens on `unix:///var/run/docker.sock`{.docutils -+# [Docker Remote API v1.7](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.7](#docker-remote-api-v1-7) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [Get a tarball containing all images and tags in a -+ repository](#get-a-tarball-containing-all-images-and-tags-in-a-repository) -+ - [Load a tarball with a set of images and tags into -+ docker](#load-a-tarball-with-a-set-of-images-and-tags-into-docker) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API has replaced rcli -+- The daemon listens on `unix:///var/run/docker.sock`{.docutils - .literal}, but you can [*Bind Docker to another host/port or a Unix - socket*](../../../use/basics/#bind-docker). --- The API tends to be REST, but for some complex commands, like -+- The API tends to be REST, but for some complex commands, like - `attach` or `pull`{.docutils .literal}, the HTTP - connection is hijacked to transport `stdout, stdin`{.docutils - .literal} and `stderr` - --## Endpoints -+## [2. Endpoints](#id3) - --### Containers -+### [2.1 Containers](#id4) - --### List containers: -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -80,24 +136,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -149,16 +205,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **config** – the container’s configuration -+ - **config** – the container’s configuration - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -223,11 +279,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -265,15 +321,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -304,11 +360,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -326,11 +382,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -360,15 +416,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **hostConfig** – the container’s host configuration (optional) -+ - **hostConfig** – the container’s host configuration (optional) - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -385,15 +441,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -410,15 +466,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -433,11 +489,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -457,23 +513,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - - **Stream details**: - -@@ -498,9 +554,9 @@ page_keywords: API, Docker, rcli, REST, documentation - - `STREAM_TYPE` can be: - -- - 0: stdin (will be writen on stdout) -- - 1: stdout -- - 2: stderr -+ - 0: stdin (will be writen on stdout) -+ - 1: stdout -+ - 2: stderr - - `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of - the uint32 size encoded as big endian. -@@ -519,7 +575,7 @@ page_keywords: API, Docker, rcli, REST, documentation - 4. Read the extracted size and output it on the correct output - 5. Goto 1) - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -538,11 +594,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -559,17 +615,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -592,13 +648,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/json`{.descname} - : **Example request**: -@@ -635,7 +691,7 @@ page_keywords: API, Docker, rcli, REST, documentation - } - ] - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -663,24 +719,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Request Headers: - -   - -- - **X-Registry-Auth** – base64-encoded AuthConfig object -+ - **X-Registry-Auth** – base64-encoded AuthConfig object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -702,10 +758,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -750,11 +806,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -783,11 +839,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -810,22 +866,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Request Headers: - -   - -- - **X-Registry-Auth** – include a base64-encoded AuthConfig -+ - **X-Registry-Auth** – include a base64-encoded AuthConfig - object. - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -842,18 +898,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -875,12 +931,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index. -@@ -928,16 +984,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **term** – term to search -+ - **term** – term to search - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -967,24 +1023,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image - - Request Headers: - -   - -- - **Content-type** – should be set to -+ - **Content-type** – should be set to - `"application/tar"`. - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -1007,11 +1063,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -1038,10 +1094,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1063,10 +1119,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1086,22 +1142,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) -- - **run** – config automatically applied when the image is run. -+ - **run** – config automatically applied when the image is run. - (ex: {“Cmd”: [“cat”, “/world”], “PortSpecs”:[“22”]}) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1125,14 +1181,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Get a tarball containing all images and tags in a repository: -+#### [Get a tarball containing all images and tags in a repository](#id36) - - `GET `{.descname}`/images/`{.descname}(*name*)`/get`{.descname} - : Get a tarball containing all images and metadata for the repository -@@ -1153,7 +1209,7 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --### Load a tarball with a set of images and tags into docker: -+#### [Load a tarball with a set of images and tags into docker](#id37) - - `POST `{.descname}`/images/load`{.descname} - : Load a set of images and tags into the docker repository. -@@ -1173,35 +1229,35 @@ page_keywords: API, Docker, rcli, REST, documentation - :statuscode 200: no error - :statuscode 500: server error - --## Going Further -+## [3. Going further](#id38) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id39) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id40) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id41) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md -index 49c8fb6..115cabc 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.8.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.8.md -@@ -2,24 +2,80 @@ page_title: Remote API v1.8 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.8 -- --## Introduction -- --- The Remote API has replaced rcli --- The daemon listens on `unix:///var/run/docker.sock`{.docutils -- .literal}, but you can [*Bind Docker to another host/port or a Unix -- socket*](../../../use/basics/#bind-docker). --- The API tends to be REST, but for some complex commands, like -- `attach` or `pull`{.docutils .literal}, the HTTP -- connection is hijacked to transport `stdout, stdin`{.docutils -- .literal} and `stderr` -- --## Endpoints -- --### Containers -- --### List containers: -+# [Docker Remote API v1.8](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.8](#docker-remote-api-v1-8) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from Dockerfile via -+ stdin](#build-an-image-from-dockerfile-via-stdin) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [Get a tarball containing all images and tags in a -+ repository](#get-a-tarball-containing-all-images-and-tags-in-a-repository) -+ - [Load a tarball with a set of images and tags into -+ docker](#load-a-tarball-with-a-set-of-images-and-tags-into-docker) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API has replaced rcli -+- The daemon listens on `unix:///var/run/docker.sock`{.docutils -+ .literal}, but you can [*Bind Docker to another host/port or a Unix -+ socket*](../../../use/basics/#bind-docker). -+- The API tends to be REST, but for some complex commands, like -+ `attach` or `pull`{.docutils .literal}, the HTTP -+ connection is hijacked to transport `stdout, stdin`{.docutils -+ .literal} and `stderr` -+ -+## [2. Endpoints](#id3) -+ -+### [2.1 Containers](#id4) -+ -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -80,24 +136,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -150,36 +206,36 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **Hostname** – Container host name -- - **User** – Username or UID -- - **Memory** – Memory Limit in bytes -- - **CpuShares** – CPU shares (relative weight -- - **AttachStdin** – 1/True/true or 0/False/false, attach to -+ - **Hostname** – Container host name -+ - **User** – Username or UID -+ - **Memory** – Memory Limit in bytes -+ - **CpuShares** – CPU shares (relative weight) -+ - **AttachStdin** – 1/True/true or 0/False/false, attach to - standard input. Default false -- - **AttachStdout** – 1/True/true or 0/False/false, attach to -+ - **AttachStdout** – 1/True/true or 0/False/false, attach to - standard output. Default false -- - **AttachStderr** – 1/True/true or 0/False/false, attach to -+ - **AttachStderr** – 1/True/true or 0/False/false, attach to - standard error. Default false -- - **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. -+ - **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. - Default false -- - **OpenStdin** – 1/True/true or 0/False/false, keep stdin open -+ - **OpenStdin** – 1/True/true or 0/False/false, keep stdin open - even if not attached. Default false - - Query Parameters: - -   - -- - **name** – Assign the specified name to the container. Must -+ - **name** – Assign the specified name to the container. Must - match `/?[a-zA-Z0-9_-]+`. - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -260,11 +316,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -302,15 +358,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -341,11 +397,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Export a container: -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -363,11 +419,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Start a container: -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -394,24 +450,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **Binds** – Create a bind mount to a directory or file with -+ - **Binds** – Create a bind mount to a directory or file with - [host-path]:[container-path]:[rw|ro]. If a directory - “container-path” is missing, then docker creates a new volume. -- - **LxcConf** – Map of custom lxc options -- - **PortBindings** – Expose ports from the container, optionally -+ - **LxcConf** – Map of custom lxc options -+ - **PortBindings** – Expose ports from the container, optionally - publishing them via the HostPort flag -- - **PublishAllPorts** – 1/True/true or 0/False/false, publish all -+ - **PublishAllPorts** – 1/True/true or 0/False/false, publish all - exposed ports to the host interfaces. Default false -- - **Privileged** – 1/True/true or 0/False/false, give extended -+ - **Privileged** – 1/True/true or 0/False/false, give extended - privileges to this container. Default false - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Stop a container: -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -428,15 +484,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Restart a container: -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -453,15 +509,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Kill a container: -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -476,11 +532,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - --### Attach to a container: -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -500,23 +556,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - - **Stream details**: - -@@ -541,9 +597,9 @@ page_keywords: API, Docker, rcli, REST, documentation - - `STREAM_TYPE` can be: - -- - 0: stdin (will be writen on stdout -- - 1: stdout -- - 2: stderr -+ - 0: stdin (will be writen on stdout) -+ - 1: stdout -+ - 2: stderr - - `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of - the uint32 size encoded as big endian. -@@ -560,9 +616,9 @@ page_keywords: API, Docker, rcli, REST, documentation - 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets - 4. Read the extracted size and output it on the correct output -- 5. Goto 1 -+ 5. Goto 1) - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -581,13 +637,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - -- `DELETE `{.descname}`/containers/`{.descname}(*id* -+ `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem - - **Example request**: -@@ -602,17 +658,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -635,13 +691,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Images -+### [2.2 Images](#id19) - --### List Images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/json`{.descname} - : **Example request**: -@@ -678,7 +734,7 @@ page_keywords: API, Docker, rcli, REST, documentation - } - ] - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -706,24 +762,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Request Headers: - -   - -- - **X-Registry-Auth** – base64-encoded AuthConfig object -+ - **X-Registry-Auth** – base64-encoded AuthConfig object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Insert a file in an image: -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -745,10 +801,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -793,11 +849,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -826,11 +882,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Push an image on the registry: -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -853,22 +909,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Request Headers: - -   - -- - **X-Registry-Auth** – include a base64-encoded AuthConfig -+ - **X-Registry-Auth** – include a base64-encoded AuthConfig - object. - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -885,20 +941,20 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - -- `DELETE `{.descname}`/images/`{.descname}(*name* -+ `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem - - **Example request**: -@@ -918,12 +974,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index. -@@ -971,16 +1027,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **term** – term to search -+ - **term** – term to search - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile via stdin: -+#### [Build an image from Dockerfile via stdin](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile via stdin -@@ -1012,25 +1068,25 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image - - Request Headers: - -   - -- - **Content-type** – should be set to -+ - **Content-type** – should be set to - `"application/tar"`. -- - **X-Registry-Auth** – base64-encoded AuthConfig object -+ - **X-Registry-Auth** – base64-encoded AuthConfig object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -1053,11 +1109,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -1084,10 +1140,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1109,10 +1165,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1132,26 +1188,26 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -- \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>” -- - **run** – config automatically applied when the image is run. -- (ex: {“Cmd”: [“cat”, “/world”], “PortSpecs”:[“22”]} -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith -+ \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) -+ - **run** – config automatically applied when the image is run. -+ (ex: {“Cmd”: [“cat”, “/world”], “PortSpecs”:[“22”]}) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -- polling (using since -+ polling (using since) - - **Example request**: - -@@ -1171,14 +1227,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Get a tarball containing all images and tags in a repository: -+#### [Get a tarball containing all images and tags in a repository](#id36) - - `GET `{.descname}`/images/`{.descname}(*name*)`/get`{.descname} - : Get a tarball containing all images and metadata for the repository -@@ -1197,10 +1253,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Load a tarball with a set of images and tags into docker: -+#### [Load a tarball with a set of images and tags into docker](#id37) - - `POST `{.descname}`/images/load`{.descname} - : Load a set of images and tags into the docker repository. -@@ -1217,38 +1273,38 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id38) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id39) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id40) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id41) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md -index 658835c..c25f837 100644 ---- a/docs/sources/reference/api/docker_remote_api_v1.9.md -+++ b/docs/sources/reference/api/docker_remote_api_v1.9.md -@@ -2,24 +2,80 @@ page_title: Remote API v1.9 - page_description: API Documentation for Docker - page_keywords: API, Docker, rcli, REST, documentation - --# Docker Remote API v1.9 -- --## Introduction -- --- The Remote API has replaced rcli --- The daemon listens on `unix:///var/run/docker.sock`{.docutils -- .literal}, but you can [*Bind Docker to another host/port or a Unix -- socket*](../../../use/basics/#bind-docker). --- The API tends to be REST, but for some complex commands, like -- `attach` or `pull`{.docutils .literal}, the HTTP -- connection is hijacked to transport `stdout, stdin`{.docutils -- .literal} and `stderr` -- --## Endpoints -- --## Containers -- --### List containers: -+# [Docker Remote API v1.9](#id1) -+ -+Table of Contents -+ -+- [Docker Remote API v1.9](#docker-remote-api-v1-9) -+ - [1. Brief introduction](#brief-introduction) -+ - [2. Endpoints](#endpoints) -+ - [2.1 Containers](#containers) -+ - [List containers](#list-containers) -+ - [Create a container](#create-a-container) -+ - [Inspect a container](#inspect-a-container) -+ - [List processes running inside a -+ container](#list-processes-running-inside-a-container) -+ - [Inspect changes on a container’s -+ filesystem](#inspect-changes-on-a-container-s-filesystem) -+ - [Export a container](#export-a-container) -+ - [Start a container](#start-a-container) -+ - [Stop a container](#stop-a-container) -+ - [Restart a container](#restart-a-container) -+ - [Kill a container](#kill-a-container) -+ - [Attach to a container](#attach-to-a-container) -+ - [Wait a container](#wait-a-container) -+ - [Remove a container](#remove-a-container) -+ - [Copy files or folders from a -+ container](#copy-files-or-folders-from-a-container) -+ - [2.2 Images](#images) -+ - [List Images](#list-images) -+ - [Create an image](#create-an-image) -+ - [Insert a file in an image](#insert-a-file-in-an-image) -+ - [Inspect an image](#inspect-an-image) -+ - [Get the history of an -+ image](#get-the-history-of-an-image) -+ - [Push an image on the -+ registry](#push-an-image-on-the-registry) -+ - [Tag an image into a -+ repository](#tag-an-image-into-a-repository) -+ - [Remove an image](#remove-an-image) -+ - [Search images](#search-images) -+ - [2.3 Misc](#misc) -+ - [Build an image from -+ Dockerfile](#build-an-image-from-dockerfile) -+ - [Check auth configuration](#check-auth-configuration) -+ - [Display system-wide -+ information](#display-system-wide-information) -+ - [Show the docker version -+ information](#show-the-docker-version-information) -+ - [Create a new image from a container’s -+ changes](#create-a-new-image-from-a-container-s-changes) -+ - [Monitor Docker’s events](#monitor-docker-s-events) -+ - [Get a tarball containing all images and tags in a -+ repository](#get-a-tarball-containing-all-images-and-tags-in-a-repository) -+ - [Load a tarball with a set of images and tags into -+ docker](#load-a-tarball-with-a-set-of-images-and-tags-into-docker) -+ - [3. Going further](#going-further) -+ - [3.1 Inside ‘docker run’](#inside-docker-run) -+ - [3.2 Hijacking](#hijacking) -+ - [3.3 CORS Requests](#cors-requests) -+ -+## [1. Brief introduction](#id2) -+ -+- The Remote API has replaced rcli -+- The daemon listens on `unix:///var/run/docker.sock`{.docutils -+ .literal}, but you can [*Bind Docker to another host/port or a Unix -+ socket*](../../../use/basics/#bind-docker). -+- The API tends to be REST, but for some complex commands, like -+ `attach` or `pull`{.docutils .literal}, the HTTP -+ connection is hijacked to transport `stdout, stdin`{.docutils -+ .literal} and `stderr` -+ -+## [2. Endpoints](#id3) -+ -+### [2.1 Containers](#id4) -+ -+#### [List containers](#id5) - - `GET `{.descname}`/containers/json`{.descname} - : List containers -@@ -80,24 +136,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **all** – 1/True/true or 0/False/false, Show all containers. -+ - **all** – 1/True/true or 0/False/false, Show all containers. - Only running containers are shown by default -- - **limit** – Show `limit` last created -+ - **limit** – Show `limit` last created - containers, include non-running ones. -- - **since** – Show only containers created since Id, include -+ - **since** – Show only containers created since Id, include - non-running ones. -- - **before** – Show only containers created before Id, include -+ - **before** – Show only containers created before Id, include - non-running ones. -- - **size** – 1/True/true or 0/False/false, Show the containers -+ - **size** – 1/True/true or 0/False/false, Show the containers - sizes - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **500** – server error - --### Create a container: -+#### [Create a container](#id6) - - `POST `{.descname}`/containers/create`{.descname} - : Create a container -@@ -150,36 +206,36 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **Hostname** – Container host name -- - **User** – Username or UID -- - **Memory** – Memory Limit in bytes -- - **CpuShares** – CPU shares (relative weight) -- - **AttachStdin** – 1/True/true or 0/False/false, attach to -+ - **Hostname** – Container host name -+ - **User** – Username or UID -+ - **Memory** – Memory Limit in bytes -+ - **CpuShares** – CPU shares (relative weight) -+ - **AttachStdin** – 1/True/true or 0/False/false, attach to - standard input. Default false -- - **AttachStdout** – 1/True/true or 0/False/false, attach to -+ - **AttachStdout** – 1/True/true or 0/False/false, attach to - standard output. Default false -- - **AttachStderr** – 1/True/true or 0/False/false, attach to -+ - **AttachStderr** – 1/True/true or 0/False/false, attach to - standard error. Default false -- - **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. -+ - **Tty** – 1/True/true or 0/False/false, allocate a pseudo-tty. - Default false -- - **OpenStdin** – 1/True/true or 0/False/false, keep stdin open -+ - **OpenStdin** – 1/True/true or 0/False/false, keep stdin open - even if not attached. Default false - - Query Parameters: - -   - -- - **name** – Assign the specified name to the container. Must -+ - **name** – Assign the specified name to the container. Must - match `/?[a-zA-Z0-9_-]+`. - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **406** – impossible to attach (container not running) -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **406** – impossible to attach (container not running) -+ - **500** – server error - --### Inspect a container: -+#### [Inspect a container](#id7) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/json`{.descname} - : Return low-level information on the container `id`{.docutils -@@ -260,11 +316,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### List processes running inside a container: -+#### [List processes running inside a container](#id8) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/top`{.descname} - : List processes running inside the container `id` -@@ -302,15 +358,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **ps\_args** – ps arguments to use (eg. aux) -+ - **ps\_args** – ps arguments to use (eg. aux) - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Inspect changes on a container’s filesystem: -+#### [Inspect changes on a container’s filesystem](#id9) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/changes`{.descname} - : Inspect changes on container `id` ‘s filesystem -@@ -341,12 +397,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -- --### Export a container: -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Export a container](#id10) - - `GET `{.descname}`/containers/`{.descname}(*id*)`/export`{.descname} - : Export the contents of container `id` -@@ -364,12 +419,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -- --### Start a container: -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Start a container](#id11) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/start`{.descname} - : Start the container `id` -@@ -396,25 +450,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **Binds** – Create a bind mount to a directory or file with -+ - **Binds** – Create a bind mount to a directory or file with - [host-path]:[container-path]:[rw|ro]. If a directory - “container-path” is missing, then docker creates a new volume. -- - **LxcConf** – Map of custom lxc options -- - **PortBindings** – Expose ports from the container, optionally -+ - **LxcConf** – Map of custom lxc options -+ - **PortBindings** – Expose ports from the container, optionally - publishing them via the HostPort flag -- - **PublishAllPorts** – 1/True/true or 0/False/false, publish all -+ - **PublishAllPorts** – 1/True/true or 0/False/false, publish all - exposed ports to the host interfaces. Default false -- - **Privileged** – 1/True/true or 0/False/false, give extended -+ - **Privileged** – 1/True/true or 0/False/false, give extended - privileges to this container. Default false - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -- --### Stop a container: -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Stop a container](#id12) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/stop`{.descname} - : Stop the container `id` -@@ -431,16 +484,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -- --### Restart a container: -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Restart a container](#id13) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/restart`{.descname} - : Restart the container `id` -@@ -457,16 +509,15 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – number of seconds to wait before killing the container -+ - **t** – number of seconds to wait before killing the container - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -- --### Kill a container: -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Kill a container](#id14) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/kill`{.descname} - : Kill the container `id` -@@ -481,12 +532,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **204** – no error -- - **404** – no such container -- - **500** – server error -- --### Attach to a container: -+ - **204** – no error -+ - **404** – no such container -+ - **500** – server error - -+#### [Attach to a container](#id15) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/attach`{.descname} - : Attach to the container `id` -@@ -506,23 +556,23 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **logs** – 1/True/true or 0/False/false, return logs. Default -+ - **logs** – 1/True/true or 0/False/false, return logs. Default - false -- - **stream** – 1/True/true or 0/False/false, return stream. -+ - **stream** – 1/True/true or 0/False/false, return stream. - Default false -- - **stdin** – 1/True/true or 0/False/false, if stream=true, attach -+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach - to stdin. Default false -- - **stdout** – 1/True/true or 0/False/false, if logs=true, return -+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return - stdout log, if stream=true, attach to stdout. Default false -- - **stderr** – 1/True/true or 0/False/false, if logs=true, return -+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return - stderr log, if stream=true, attach to stderr. Default false - - Status Codes: - -- - **200** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - - **Stream details**: - -@@ -547,9 +597,9 @@ page_keywords: API, Docker, rcli, REST, documentation - - `STREAM_TYPE` can be: - -- - 0: stdin (will be writen on stdout) -- - 1: stdout -- - 2: stderr -+ - 0: stdin (will be writen on stdout) -+ - 1: stdout -+ - 2: stderr - - `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of - the uint32 size encoded as big endian. -@@ -568,7 +618,7 @@ page_keywords: API, Docker, rcli, REST, documentation - 4. Read the extracted size and output it on the correct output - 5. Goto 1) - --### Wait a container: -+#### [Wait a container](#id16) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/wait`{.descname} - : Block until container `id` stops, then returns -@@ -587,11 +637,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --### Remove a container: -+#### [Remove a container](#id17) - - `DELETE `{.descname}`/containers/`{.descname}(*id*) - : Remove the container `id` from the filesystem -@@ -608,17 +658,17 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **v** – 1/True/true or 0/False/false, Remove the volumes -+ - **v** – 1/True/true or 0/False/false, Remove the volumes - associated to the container. Default false - - Status Codes: - -- - **204** – no error -- - **400** – bad parameter -- - **404** – no such container -- - **500** – server error -+ - **204** – no error -+ - **400** – bad parameter -+ - **404** – no such container -+ - **500** – server error - --### Copy files or folders from a container: -+#### [Copy files or folders from a container](#id18) - - `POST `{.descname}`/containers/`{.descname}(*id*)`/copy`{.descname} - : Copy files or folders of container `id` -@@ -641,13 +691,13 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such container -- - **500** – server error -+ - **200** – no error -+ - **404** – no such container -+ - **500** – server error - --## Images -+### [2.2 Images](#id19) - --### List Images: -+#### [List Images](#id20) - - `GET `{.descname}`/images/json`{.descname} - : **Example request**: -@@ -684,7 +734,7 @@ page_keywords: API, Docker, rcli, REST, documentation - } - ] - --### Create an image: -+#### [Create an image](#id21) - - `POST `{.descname}`/images/create`{.descname} - : Create an image, either by pull it from the registry or by importing -@@ -712,25 +762,24 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **fromImage** – name of the image to pull -- - **fromSrc** – source to import, - means stdin -- - **repo** – repository -- - **tag** – tag -- - **registry** – the registry to pull from -+ - **fromImage** – name of the image to pull -+ - **fromSrc** – source to import, - means stdin -+ - **repo** – repository -+ - **tag** – tag -+ - **registry** – the registry to pull from - - Request Headers: - -   - -- - **X-Registry-Auth** – base64-encoded AuthConfig object -+ - **X-Registry-Auth** – base64-encoded AuthConfig object - - Status Codes: - -- - **200** – no error -- - **500** – server error -- --### Insert a file in an image: -+ - **200** – no error -+ - **500** – server error - -+#### [Insert a file in an image](#id22) - - `POST `{.descname}`/images/`{.descname}(*name*)`/insert`{.descname} - : Insert a file from `url` in the image -@@ -752,10 +801,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Inspect an image: -+#### [Inspect an image](#id23) - - `GET `{.descname}`/images/`{.descname}(*name*)`/json`{.descname} - : Return low-level information on the image `name` -@@ -800,11 +849,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Get the history of an image: -+#### [Get the history of an image](#id24) - - `GET `{.descname}`/images/`{.descname}(*name*)`/history`{.descname} - : Return the history of the image `name` -@@ -833,12 +882,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -- --### Push an image on the registry: -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - -+#### [Push an image on the registry](#id25) - - `POST `{.descname}`/images/`{.descname}(*name*)`/push`{.descname} - : Push the image `name` on the registry -@@ -861,22 +909,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **registry** – the registry you wan to push, optional -+ - **registry** – the registry you wan to push, optional - - Request Headers: - -   - -- - **X-Registry-Auth** – include a base64-encoded AuthConfig -+ - **X-Registry-Auth** – include a base64-encoded AuthConfig - object. - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **500** – server error - --### Tag an image into a repository: -+#### [Tag an image into a repository](#id26) - - `POST `{.descname}`/images/`{.descname}(*name*)`/tag`{.descname} - : Tag the image `name` into a repository -@@ -893,18 +941,18 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **repo** – The repository to tag in -- - **force** – 1/True/true or 0/False/false, default false -+ - **repo** – The repository to tag in -+ - **force** – 1/True/true or 0/False/false, default false - - Status Codes: - -- - **201** – no error -- - **400** – bad parameter -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **201** – no error -+ - **400** – bad parameter -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Remove an image: -+#### [Remove an image](#id27) - - `DELETE `{.descname}`/images/`{.descname}(*name*) - : Remove the image `name` from the filesystem -@@ -926,12 +974,12 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **404** – no such image -- - **409** – conflict -- - **500** – server error -+ - **200** – no error -+ - **404** – no such image -+ - **409** – conflict -+ - **500** – server error - --### Search images: -+#### [Search images](#id28) - - `GET `{.descname}`/images/search`{.descname} - : Search for an image in the docker index. -@@ -979,16 +1027,16 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **term** – term to search -+ - **term** – term to search - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Misc -+### [2.3 Misc](#id29) - --### Build an image from Dockerfile: -+#### [Build an image from Dockerfile](#id30) - - `POST `{.descname}`/build`{.descname} - : Build an image from Dockerfile using a POST body. -@@ -1020,26 +1068,26 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **t** – repository name (and optionally a tag) to be applied to -+ - **t** – repository name (and optionally a tag) to be applied to - the resulting image in case of success -- - **q** – suppress verbose build output -- - **nocache** – do not use the cache when building the image -- - **rm** – Remove intermediate containers after a successful build -+ - **q** – suppress verbose build output -+ - **nocache** – do not use the cache when building the image -+ - **rm** – Remove intermediate containers after a successful build - - Request Headers: - -   - -- - **Content-type** – should be set to -+ - **Content-type** – should be set to - `"application/tar"`. -- - **X-Registry-Config** – base64-encoded ConfigFile object -+ - **X-Registry-Config** – base64-encoded ConfigFile object - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Check auth configuration: -+#### [Check auth configuration](#id31) - - `POST `{.descname}`/auth`{.descname} - : Get the default username and email -@@ -1062,11 +1110,11 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **204** – no error -- - **500** – server error -+ - **200** – no error -+ - **204** – no error -+ - **500** – server error - --### Display system-wide information: -+#### [Display system-wide information](#id32) - - `GET `{.descname}`/info`{.descname} - : Display system-wide information -@@ -1093,10 +1141,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Show the docker version information: -+#### [Show the docker version information](#id33) - - `GET `{.descname}`/version`{.descname} - : Show the docker version information -@@ -1118,10 +1166,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Create a new image from a container’s changes: -+#### [Create a new image from a container’s changes](#id34) - - `POST `{.descname}`/commit`{.descname} - : Create a new image from a container’s changes -@@ -1141,22 +1189,22 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **container** – source container -- - **repo** – repository -- - **tag** – tag -- - **m** – commit message -- - **author** – author (eg. “John Hannibal Smith -+ - **container** – source container -+ - **repo** – repository -+ - **tag** – tag -+ - **m** – commit message -+ - **author** – author (eg. “John Hannibal Smith - \<[hannibal@a-team.com](mailto:hannibal%40a-team.com)\>”) -- - **run** – config automatically applied when the image is run. -+ - **run** – config automatically applied when the image is run. - (ex: {“Cmd”: [“cat”, “/world”], “PortSpecs”:[“22”]}) - - Status Codes: - -- - **201** – no error -- - **404** – no such container -- - **500** – server error -+ - **201** – no error -+ - **404** – no such container -+ - **500** – server error - --### Monitor Docker’s events: -+#### [Monitor Docker’s events](#id35) - - `GET `{.descname}`/events`{.descname} - : Get events from docker, either in real time via streaming, or via -@@ -1180,14 +1228,14 @@ page_keywords: API, Docker, rcli, REST, documentation - -   - -- - **since** – timestamp used for polling -+ - **since** – timestamp used for polling - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Get a tarball containing all images and tags in a repository: -+#### [Get a tarball containing all images and tags in a repository](#id36) - - `GET `{.descname}`/images/`{.descname}(*name*)`/get`{.descname} - : Get a tarball containing all images and metadata for the repository -@@ -1206,10 +1254,10 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --### Load a tarball with a set of images and tags into docker: -+#### [Load a tarball with a set of images and tags into docker](#id37) - - `POST `{.descname}`/images/load`{.descname} - : Load a set of images and tags into the docker repository. -@@ -1226,38 +1274,38 @@ page_keywords: API, Docker, rcli, REST, documentation - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - --## Going Further -+## [3. Going further](#id38) - --### Inside ‘docker run’ -+### [3.1 Inside ‘docker run’](#id39) - - Here are the steps of ‘docker run’ : - --- Create the container -+- Create the container - --- If the status code is 404, it means the image doesn’t exists: -- : - Try to pull it -- - Then retry to create the container -+- If the status code is 404, it means the image doesn’t exists: -+ : - Try to pull it -+ - Then retry to create the container - --- Start the container -+- Start the container - --- If you are not in detached mode: -- : - Attach to the container, using logs=1 (to have stdout and -+- If you are not in detached mode: -+ : - Attach to the container, using logs=1 (to have stdout and - stderr from the container’s start) and stream=1 - --- If in detached mode or only stdin is attached: -- : - Display the container’s id -+- If in detached mode or only stdin is attached: -+ : - Display the container’s id - --### Hijacking -+### [3.2 Hijacking](#id40) - - In this version of the API, /attach, uses hijacking to transport stdin, - stdout and stderr on the same socket. This might change in the future. - --### CORS Requests -+### [3.3 CORS Requests](#id41) - - To enable cross origin requests to the remote api add the flag --“-api-enable-cors” when running docker in daemon mode. -+“–api-enable-cors” when running docker in daemon mode. - -- docker -d -H="192.168.1.9:4243" -api-enable-cors -+ docker -d -H="192.168.1.9:4243" --api-enable-cors -diff --git a/docs/sources/reference/api/index_api.md b/docs/sources/reference/api/index_api.md -index 83cf36b..e9bcc2b 100644 ---- a/docs/sources/reference/api/index_api.md -+++ b/docs/sources/reference/api/index_api.md -@@ -4,17 +4,19 @@ page_keywords: API, Docker, index, REST, documentation - - # Docker Index API - --## Introduction -+## 1. Brief introduction - --- This is the REST API for the Docker index --- Authorization is done with basic auth over SSL --- Not all commands require authentication, only those noted as such. -+- This is the REST API for the Docker index -+- Authorization is done with basic auth over SSL -+- Not all commands require authentication, only those noted as such. - --## Repository -+## 2. Endpoints - --### Repositories -+### 2.1 Repository - --### User Repo -+#### Repositories -+ -+##### User Repo - - `PUT `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repo\_name*)`/`{.descname} - : Create a user repository with the given `namespace`{.docutils -@@ -33,8 +35,8 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **namespace** – the namespace for the repo -- - **repo\_name** – the name for the repo -+ - **namespace** – the namespace for the repo -+ - **repo\_name** – the name for the repo - - **Example Response**: - -@@ -49,10 +51,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – Created -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active -+ - **200** – Created -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active - - `DELETE `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repo\_name*)`/`{.descname} - : Delete a user repository with the given `namespace`{.docutils -@@ -71,8 +73,8 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **namespace** – the namespace for the repo -- - **repo\_name** – the name for the repo -+ - **namespace** – the namespace for the repo -+ - **repo\_name** – the name for the repo - - **Example Response**: - -@@ -87,13 +89,13 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – Deleted -- - **202** – Accepted -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active -+ - **200** – Deleted -+ - **202** – Accepted -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active - --### Library Repo -+##### Library Repo - - `PUT `{.descname}`/v1/repositories/`{.descname}(*repo\_name*)`/`{.descname} - : Create a library repository with the given `repo_name`{.docutils -@@ -116,7 +118,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **repo\_name** – the library name for the repo -+ - **repo\_name** – the library name for the repo - - **Example Response**: - -@@ -131,10 +133,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – Created -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active -+ - **200** – Created -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active - - `DELETE `{.descname}`/v1/repositories/`{.descname}(*repo\_name*)`/`{.descname} - : Delete a library repository with the given `repo_name`{.docutils -@@ -157,7 +159,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **repo\_name** – the library name for the repo -+ - **repo\_name** – the library name for the repo - - **Example Response**: - -@@ -172,15 +174,15 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – Deleted -- - **202** – Accepted -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active -+ - **200** – Deleted -+ - **202** – Accepted -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active - --### Repository Images -+#### Repository Images - --### User Repo Images -+##### User Repo Images - - `PUT `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repo\_name*)`/images`{.descname} - : Update the images for a user repo. -@@ -198,8 +200,8 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **namespace** – the namespace for the repo -- - **repo\_name** – the name for the repo -+ - **namespace** – the namespace for the repo -+ - **repo\_name** – the name for the repo - - **Example Response**: - -@@ -211,10 +213,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **204** – Created -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active or permission denied -+ - **204** – Created -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active or permission denied - - `GET `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repo\_name*)`/images`{.descname} - : get the images for a user repo. -@@ -227,8 +229,8 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **namespace** – the namespace for the repo -- - **repo\_name** – the name for the repo -+ - **namespace** – the namespace for the repo -+ - **repo\_name** – the name for the repo - - **Example Response**: - -@@ -243,10 +245,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – OK -- - **404** – Not found -+ - **200** – OK -+ - **404** – Not found - --### Library Repo Images -+##### Library Repo Images - - `PUT `{.descname}`/v1/repositories/`{.descname}(*repo\_name*)`/images`{.descname} - : Update the images for a library repo. -@@ -264,7 +266,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **repo\_name** – the library name for the repo -+ - **repo\_name** – the library name for the repo - - **Example Response**: - -@@ -276,10 +278,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **204** – Created -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active or permission denied -+ - **204** – Created -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active or permission denied - - `GET `{.descname}`/v1/repositories/`{.descname}(*repo\_name*)`/images`{.descname} - : get the images for a library repo. -@@ -292,7 +294,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **repo\_name** – the library name for the repo -+ - **repo\_name** – the library name for the repo - - **Example Response**: - -@@ -307,12 +309,12 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – OK -- - **404** – Not found -+ - **200** – OK -+ - **404** – Not found - --### Repository Authorization -+#### Repository Authorization - --### Library Repo -+##### Library Repo - - `PUT `{.descname}`/v1/repositories/`{.descname}(*repo\_name*)`/auth`{.descname} - : authorize a token for a library repo -@@ -326,7 +328,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **repo\_name** – the library name for the repo -+ - **repo\_name** – the library name for the repo - - **Example Response**: - -@@ -338,11 +340,11 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – OK -- - **403** – Permission denied -- - **404** – Not found -+ - **200** – OK -+ - **403** – Permission denied -+ - **404** – Not found - --### User Repo -+##### User Repo - - `PUT `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repo\_name*)`/auth`{.descname} - : authorize a token for a user repo -@@ -356,8 +358,8 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **namespace** – the namespace for the repo -- - **repo\_name** – the name for the repo -+ - **namespace** – the namespace for the repo -+ - **repo\_name** – the name for the repo - - **Example Response**: - -@@ -369,13 +371,13 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – OK -- - **403** – Permission denied -- - **404** – Not found -+ - **200** – OK -+ - **403** – Permission denied -+ - **404** – Not found - --### Users -+### 2.2 Users - --### User Login -+#### User Login - - `GET `{.descname}`/v1/users`{.descname} - : If you want to check your login, you can try this endpoint -@@ -397,11 +399,11 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **200** – no error -- - **401** – Unauthorized -- - **403** – Account is not Active -+ - **200** – no error -+ - **401** – Unauthorized -+ - **403** – Account is not Active - --### User Register -+#### User Register - - `POST `{.descname}`/v1/users`{.descname} - : Registering a new account. -@@ -421,10 +423,10 @@ page_keywords: API, Docker, index, REST, documentation - -   - -- - **email** – valid email address, that needs to be confirmed -- - **username** – min 4 character, max 30 characters, must match -+ - **email** – valid email address, that needs to be confirmed -+ - **username** – min 4 character, max 30 characters, must match - the regular expression [a-z0-9\_]. -- - **password** – min 5 characters -+ - **password** – min 5 characters - - **Example Response**: - -@@ -436,10 +438,10 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **201** – User Created -- - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **201** – User Created -+ - **400** – Errors (invalid json, missing or invalid fields, etc) - --### Update User -+#### Update User - - `PUT `{.descname}`/v1/users/`{.descname}(*username*)`/`{.descname} - : Change a password or email address for given user. If you pass in an -@@ -463,7 +465,7 @@ page_keywords: API, Docker, index, REST, documentation - - Parameters: - -- - **username** – username for the person you want to update -+ - **username** – username for the person you want to update - - **Example Response**: - -@@ -475,17 +477,17 @@ page_keywords: API, Docker, index, REST, documentation - - Status Codes: - -- - **204** – User Updated -- - **400** – Errors (invalid json, missing or invalid fields, etc) -- - **401** – Unauthorized -- - **403** – Account is not Active -- - **404** – User not found -+ - **204** – User Updated -+ - **400** – Errors (invalid json, missing or invalid fields, etc) -+ - **401** – Unauthorized -+ - **403** – Account is not Active -+ - **404** – User not found - --## Search -+### 2.3 Search - - If you need to search the index, this is the endpoint you would use. - --### Search -+#### Search - - `GET `{.descname}`/v1/search`{.descname} - : Search the Index given a search term. It accepts -@@ -515,11 +517,13 @@ If you need to search the index, this is the endpoint you would use. - - Query Parameters: - -- - **q** – what you want to search for -+   -+ -+ - **q** – what you want to search for - - Status Codes: - -- - **200** – no error -- - **500** – server error -+ - **200** – no error -+ - **500** – server error - - -diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md -index e067586..f251169 100644 ---- a/docs/sources/reference/api/registry_api.md -+++ b/docs/sources/reference/api/registry_api.md -@@ -4,34 +4,34 @@ page_keywords: API, Docker, index, registry, REST, documentation - - # Docker Registry API - --## Introduction -+## 1. Brief introduction - --- This is the REST API for the Docker Registry --- It stores the images and the graph for a set of repositories --- It does not have user accounts data --- It has no notion of user accounts or authorization --- It delegates authentication and authorization to the Index Auth -+- This is the REST API for the Docker Registry -+- It stores the images and the graph for a set of repositories -+- It does not have user accounts data -+- It has no notion of user accounts or authorization -+- It delegates authentication and authorization to the Index Auth - service using tokens --- It supports different storage backends (S3, cloud files, local FS) --- It doesn’t have a local database --- It will be open-sourced at some point -+- It supports different storage backends (S3, cloud files, local FS) -+- It doesn’t have a local database -+- It will be open-sourced at some point - - We expect that there will be multiple registries out there. To help to - grasp the context, here are some examples of registries: - --- **sponsor registry**: such a registry is provided by a third-party -+- **sponsor registry**: such a registry is provided by a third-party - hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third - party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates - authentication and authorization to the Index. --- **mirror registry**: such a registry is provided by a third-party -+- **mirror registry**: such a registry is provided by a third-party - hosting infrastructure but is targeted at their customers only. Some - mechanism (unspecified to date) ensures that public images are - pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” - those images locally. --- **vendor registry**: such a registry is provided by a software -+- **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated - and managed by the vendor. Only users authorized by the vendor would - be able to get write access. Some images would be public (accessible -@@ -41,7 +41,7 @@ grasp the context, here are some examples of registries: - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a - sponsor registry, while retaining control on the asset distribution. --- **private registry**: such a registry is located behind a firewall, -+- **private registry**: such a registry is located behind a firewall, - or protected by an additional security layer (HTTP authorization, - SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud’s -@@ -58,9 +58,9 @@ can be powered by a simple static HTTP server. - Note - - The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial): --: - HTTP with GET (and PUT for read-write registries); -- - local mount point; -- - remote docker addressed through SSH. -+: - HTTP with GET (and PUT for read-write registries); -+ - local mount point; -+ - remote docker addressed through SSH. - - The latter would only require two new commands in docker, e.g. - `registryget` and `registryput`{.docutils .literal}, -@@ -68,11 +68,11 @@ wrapping access to the local filesystem (and optionally doing - consistency checks). Authentication and authorization are then delegated - to SSH (e.g. with public keys). - --## Endpoints -+## 2. Endpoints - --### Images -+### 2.1 Images - --### Layer -+#### Layer - - `GET `{.descname}`/v1/images/`{.descname}(*image\_id*)`/layer`{.descname} - : get image layer for a given `image_id` -@@ -87,7 +87,7 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **image\_id** – the id for the layer you want to get -+ - **image\_id** – the id for the layer you want to get - - **Example Response**: - -@@ -100,9 +100,9 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Image not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Image not found - - `PUT `{.descname}`/v1/images/`{.descname}(*image\_id*)`/layer`{.descname} - : put image layer for a given `image_id` -@@ -118,7 +118,7 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **image\_id** – the id for the layer you want to get -+ - **image\_id** – the id for the layer you want to get - - **Example Response**: - -@@ -131,11 +131,11 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Image not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Image not found - --### Image -+#### Image - - `PUT `{.descname}`/v1/images/`{.descname}(*image\_id*)`/json`{.descname} - : put image for a given `image_id` -@@ -181,7 +181,7 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **image\_id** – the id for the layer you want to get -+ - **image\_id** – the id for the layer you want to get - - **Example Response**: - -@@ -194,8 +194,8 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -+ - **200** – OK -+ - **401** – Requires authorization - - `GET `{.descname}`/v1/images/`{.descname}(*image\_id*)`/json`{.descname} - : get image for a given `image_id` -@@ -210,7 +210,7 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **image\_id** – the id for the layer you want to get -+ - **image\_id** – the id for the layer you want to get - - **Example Response**: - -@@ -254,11 +254,11 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Image not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Image not found - --### Ancestry -+#### Ancestry - - `GET `{.descname}`/v1/images/`{.descname}(*image\_id*)`/ancestry`{.descname} - : get ancestry for an image given an `image_id` -@@ -273,7 +273,7 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **image\_id** – the id for the layer you want to get -+ - **image\_id** – the id for the layer you want to get - - **Example Response**: - -@@ -289,11 +289,11 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Image not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Image not found - --### Tags -+### 2.2 Tags - - `GET `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repository*)`/tags`{.descname} - : get all of the tags for the given repo. -@@ -309,8 +309,8 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **namespace** – namespace for the repo -- - **repository** – name for the repo -+ - **namespace** – namespace for the repo -+ - **repository** – name for the repo - - **Example Response**: - -@@ -326,9 +326,9 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Repository not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Repository not found - - `GET `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repository*)`/tags/`{.descname}(*tag*) - : get a tag for the given repo. -@@ -344,9 +344,9 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **namespace** – namespace for the repo -- - **repository** – name for the repo -- - **tag** – name of tag you want to get -+ - **namespace** – namespace for the repo -+ - **repository** – name for the repo -+ - **tag** – name of tag you want to get - - **Example Response**: - -@@ -359,9 +359,9 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Tag not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Tag not found - - `DELETE `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repository*)`/tags/`{.descname}(*tag*) - : delete the tag for the repo -@@ -376,9 +376,9 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **namespace** – namespace for the repo -- - **repository** – name for the repo -- - **tag** – name of tag you want to delete -+ - **namespace** – namespace for the repo -+ - **repository** – name for the repo -+ - **tag** – name of tag you want to delete - - **Example Response**: - -@@ -391,9 +391,9 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Tag not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Tag not found - - `PUT `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repository*)`/tags/`{.descname}(*tag*) - : put a tag for the given repo. -@@ -410,9 +410,9 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **namespace** – namespace for the repo -- - **repository** – name for the repo -- - **tag** – name of tag you want to add -+ - **namespace** – namespace for the repo -+ - **repository** – name for the repo -+ - **tag** – name of tag you want to add - - **Example Response**: - -@@ -425,12 +425,12 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **400** – Invalid data -- - **401** – Requires authorization -- - **404** – Image not found -+ - **200** – OK -+ - **400** – Invalid data -+ - **401** – Requires authorization -+ - **404** – Image not found - --### Repositories -+### 2.3 Repositories - - `DELETE `{.descname}`/v1/repositories/`{.descname}(*namespace*)`/`{.descname}(*repository*)`/`{.descname} - : delete a repository -@@ -447,8 +447,8 @@ to SSH (e.g. with public keys). - - Parameters: - -- - **namespace** – namespace for the repo -- - **repository** – name for the repo -+ - **namespace** – namespace for the repo -+ - **repository** – name for the repo - - **Example Response**: - -@@ -461,11 +461,11 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -- - **401** – Requires authorization -- - **404** – Repository not found -+ - **200** – OK -+ - **401** – Requires authorization -+ - **404** – Repository not found - --### Status -+### 2.4 Status - - `GET `{.descname}`/v1/_ping`{.descname} - : Check status of the registry. This endpoint is also used to -@@ -491,9 +491,9 @@ to SSH (e.g. with public keys). - - Status Codes: - -- - **200** – OK -+ - **200** – OK - --## Authorization -+## 3 Authorization - - This is where we describe the authorization process, including the - tokens and cookies. -diff --git a/docs/sources/reference/api/registry_index_spec.md b/docs/sources/reference/api/registry_index_spec.md -index dc0dd80..281fe07 100644 ---- a/docs/sources/reference/api/registry_index_spec.md -+++ b/docs/sources/reference/api/registry_index_spec.md -@@ -4,55 +4,55 @@ page_keywords: docker, registry, api, index - - # Registry & Index Spec - --## The 3 roles -+## 1. The 3 roles - --### Index -+### 1.1 Index - - The Index is responsible for centralizing information about: - --- User accounts --- Checksums of the images --- Public namespaces -+- User accounts -+- Checksums of the images -+- Public namespaces - - The Index has different components: - --- Web UI --- Meta-data store (comments, stars, list public repositories) --- Authentication service --- Tokenization -+- Web UI -+- Meta-data store (comments, stars, list public repositories) -+- Authentication service -+- Tokenization - - The index is authoritative for those information. - - We expect that there will be only one instance of the index, run and - managed by Docker Inc. - --### Registry -+### 1.2 Registry - --- It stores the images and the graph for a set of repositories --- It does not have user accounts data --- It has no notion of user accounts or authorization --- It delegates authentication and authorization to the Index Auth -+- It stores the images and the graph for a set of repositories -+- It does not have user accounts data -+- It has no notion of user accounts or authorization -+- It delegates authentication and authorization to the Index Auth - service using tokens --- It supports different storage backends (S3, cloud files, local FS) --- It doesn’t have a local database --- [Source Code](https://github.com/dotcloud/docker-registry) -+- It supports different storage backends (S3, cloud files, local FS) -+- It doesn’t have a local database -+- [Source Code](https://github.com/dotcloud/docker-registry) - - We expect that there will be multiple registries out there. To help to - grasp the context, here are some examples of registries: - --- **sponsor registry**: such a registry is provided by a third-party -+- **sponsor registry**: such a registry is provided by a third-party - hosting infrastructure as a convenience for their customers and the - docker community as a whole. Its costs are supported by the third - party, but the management and operation of the registry are - supported by dotCloud. It features read/write access, and delegates - authentication and authorization to the Index. --- **mirror registry**: such a registry is provided by a third-party -+- **mirror registry**: such a registry is provided by a third-party - hosting infrastructure but is targeted at their customers only. Some - mechanism (unspecified to date) ensures that public images are - pulled from a sponsor registry to the mirror registry, to make sure - that the customers of the third-party provider can “docker pull” - those images locally. --- **vendor registry**: such a registry is provided by a software -+- **vendor registry**: such a registry is provided by a software - vendor, who wants to distribute docker images. It would be operated - and managed by the vendor. Only users authorized by the vendor would - be able to get write access. Some images would be public (accessible -@@ -62,20 +62,19 @@ grasp the context, here are some examples of registries: - basho/riak1.3” and automatically push from the vendor registry - (instead of a sponsor registry); i.e. get all the convenience of a - sponsor registry, while retaining control on the asset distribution. --- **private registry**: such a registry is located behind a firewall, -+- **private registry**: such a registry is located behind a firewall, - or protected by an additional security layer (HTTP authorization, - SSL client-side certificates, IP address authorization...). The - registry is operated by a private entity, outside of dotCloud’s - control. It can optionally delegate additional authorization to the - Index, but it is not mandatory. - --> **Note:** The latter implies that while HTTP is the protocol --> of choice for a registry, multiple schemes are possible (and --> in some cases, trivial): --> --> - HTTP with GET (and PUT for read-write registries); --> - local mount point; --> - remote docker addressed through SSH. -+Note -+ -+The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial): -+: - HTTP with GET (and PUT for read-write registries); -+ - local mount point; -+ - remote docker addressed through SSH. - - The latter would only require two new commands in docker, e.g. - `registryget` and `registryput`{.docutils .literal}, -@@ -83,17 +82,17 @@ wrapping access to the local filesystem (and optionally doing - consistency checks). Authentication and authorization are then delegated - to SSH (e.g. with public keys). - --### Docker -+### 1.3 Docker - - On top of being a runtime for LXC, Docker is the Registry client. It - supports: - --- Push / Pull on the registry --- Client authentication on the Index -+- Push / Pull on the registry -+- Client authentication on the Index - --## Workflow -+## 2. Workflow - --### Pull -+### 2.1 Pull - - ![](../../../_images/docker_pull_chart.png) - -@@ -147,9 +146,9 @@ and for an active account. - 2. (Index -\> Docker) HTTP 200 OK - - > **Headers**: -- > : - Authorization: Token -+ > : - Authorization: Token - > signature=123abc,repository=”foo/bar”,access=write -- > - X-Docker-Endpoints: registry.docker.io [, -+ > - X-Docker-Endpoints: registry.docker.io [, - > registry2.docker.io] - > - > **Body**: -@@ -188,7 +187,7 @@ Note - If someone makes a second request, then we will always give a new token, - never reuse tokens. - --### Push -+### 2.2 Push - - ![](../../../_images/docker_push_chart.png) - -@@ -204,15 +203,17 @@ never reuse tokens. - pushed by docker and store the repository (with its images) - 6. docker contacts the index to give checksums for upload images - --> **Note:** --> **It’s possible not to use the Index at all!** In this case, a deployed --> version of the Registry is deployed to store and serve images. Those --> images are not authenticated and the security is not guaranteed. -+Note -+ -+**It’s possible not to use the Index at all!** In this case, a deployed -+version of the Registry is deployed to store and serve images. Those -+images are not authenticated and the security is not guaranteed. -+ -+Note - --> **Note:** --> **Index can be replaced!** For a private Registry deployed, a custom --> Index can be used to serve and validate token according to different --> policies. -+**Index can be replaced!** For a private Registry deployed, a custom -+Index can be used to serve and validate token according to different -+policies. - - Docker computes the checksums and submit them to the Index at the end of - the push. When a repository name does not have checksums on the Index, -@@ -227,7 +228,7 @@ the end). - true - - **Action**:: -- : - in index, we allocated a new repository, and set to -+ : - in index, we allocated a new repository, and set to - initialized - - **Body**:: -@@ -239,9 +240,9 @@ the end). - - 2. (Index -\> Docker) 200 Created - : **Headers**: -- : - WWW-Authenticate: Token -+ : - WWW-Authenticate: Token - signature=123abc,repository=”foo/bar”,access=write -- - X-Docker-Endpoints: registry.docker.io [, -+ - X-Docker-Endpoints: registry.docker.io [, - registry2.docker.io] - - 3. (Docker -\> Registry) PUT /v1/images/98765432\_parent/json -@@ -255,18 +256,18 @@ the end). - signature=123abc,repository=”foo/bar”,access=write - - **Action**:: -- : - Index: -+ : - Index: - : will invalidate the token. - -- - Registry: -+ - Registry: - : grants a session (if token is approved) and fetches - the images id - - 5. (Docker -\> Registry) PUT /v1/images/98765432\_parent/json - : **Headers**:: -- : - Authorization: Token -+ : - Authorization: Token - signature=123abc,repository=”foo/bar”,access=write -- - Cookie: (Cookie provided by the Registry) -+ - Cookie: (Cookie provided by the Registry) - - 6. (Docker -\> Registry) PUT /v1/images/98765432/json - : **Headers**: -@@ -303,17 +304,19 @@ the end). - - **Return** HTTP 204 - --> **Note:** If push fails and they need to start again, what happens in the index, --> there will already be a record for the namespace/name, but it will be --> initialized. Should we allow it, or mark as name already used? One edge --> case could be if someone pushes the same thing at the same time with two --> different shells. -+Note -+ -+If push fails and they need to start again, what happens in the index, -+there will already be a record for the namespace/name, but it will be -+initialized. Should we allow it, or mark as name already used? One edge -+case could be if someone pushes the same thing at the same time with two -+different shells. - - If it’s a retry on the Registry, Docker has a cookie (provided by the - registry after token validation). So the Index won’t have to provide a - new token. - --### Delete -+### 2.3 Delete - - If you need to delete something from the index or registry, we need a - nice clean way to do that. Here is the workflow. -@@ -333,9 +336,11 @@ nice clean way to do that. Here is the workflow. - 6. docker contacts the index to let it know it was removed from the - registry, the index removes all records from the database. - --> **Note:** The Docker client should present an “Are you sure?” prompt to confirm --> the deletion before starting the process. Once it starts it can’t be --> undone. -+Note -+ -+The Docker client should present an “Are you sure?” prompt to confirm -+the deletion before starting the process. Once it starts it can’t be -+undone. - - #### API (deleting repository foo/bar): - -@@ -345,7 +350,7 @@ nice clean way to do that. Here is the workflow. - true - - **Action**:: -- : - in index, we make sure it is a valid repository, and set -+ : - in index, we make sure it is a valid repository, and set - to deleted (logically) - - **Body**:: -@@ -353,9 +358,9 @@ nice clean way to do that. Here is the workflow. - - 2. (Index -\> Docker) 202 Accepted - : **Headers**: -- : - WWW-Authenticate: Token -+ : - WWW-Authenticate: Token - signature=123abc,repository=”foo/bar”,access=delete -- - X-Docker-Endpoints: registry.docker.io [, -+ - X-Docker-Endpoints: registry.docker.io [, - registry2.docker.io] \# list of endpoints where this - repo lives. - -@@ -370,10 +375,10 @@ nice clean way to do that. Here is the workflow. - signature=123abc,repository=”foo/bar”,access=delete - - **Action**:: -- : - Index: -+ : - Index: - : will invalidate the token. - -- - Registry: -+ - Registry: - : deletes the repository (if token is approved) - - 5. (Registry -\> Docker) 200 OK -@@ -391,20 +396,20 @@ nice clean way to do that. Here is the workflow. - > - > **Return** HTTP 200 - --## How to use the Registry in standalone mode -+## 3. How to use the Registry in standalone mode - - The Index has two main purposes (along with its fancy social features): - --- Resolve short names (to avoid passing absolute URLs all the time) -- : - username/projectname -\> -+- Resolve short names (to avoid passing absolute URLs all the time) -+ : - username/projectname -\> - https://registry.docker.io/users/\/repositories/\/ -- - team/projectname -\> -+ - team/projectname -\> - https://registry.docker.io/team/\/repositories/\/ - --- Authenticate a user as a repos owner (for a central referenced -+- Authenticate a user as a repos owner (for a central referenced - repository) - --### Without an Index -+### 3.1 Without an Index - - Using the Registry without the Index can be useful to store the images - on a private network without having to rely on an external entity -@@ -425,12 +430,12 @@ As hinted previously, a standalone registry can also be implemented by - any HTTP server handling GET/PUT requests (or even only GET requests if - no write access is necessary). - --### With an Index -+### 3.2 With an Index - - The Index data needed by the Registry are simple: - --- Serve the checksums --- Provide and authorize a Token -+- Serve the checksums -+- Provide and authorize a Token - - In the scenario of a Registry running on a private network with the need - of centralizing and authorizing, it’s easy to use a custom Index. -@@ -441,12 +446,12 @@ specific Index, it’ll be the private entity responsibility (basically - the organization who uses Docker in a private environment) to maintain - the Index and the Docker’s configuration among its consumers. - --## The API -+## 4. The API - - The first version of the api is available here: - [https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md](https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md) - --### Images -+### 4.1 Images - - The format returned in the images is not defined here (for layer and - JSON), basically because Registry stores exactly the same kind of -@@ -464,9 +469,9 @@ file is empty. - GET /v1/images//ancestry - PUT /v1/images//ancestry - --### Users -+### 4.2 Users - --### Create a user (Index) -+#### 4.2.1 Create a user (Index) - - POST /v1/users - -@@ -474,9 +479,9 @@ POST /v1/users - : {“email”: “[sam@dotcloud.com](mailto:sam%40dotcloud.com)”, - “password”: “toto42”, “username”: “foobar”’} - **Validation**: --: - **username**: min 4 character, max 30 characters, must match the -+: - **username**: min 4 character, max 30 characters, must match the - regular expression [a-z0-9\_]. -- - **password**: min 5 characters -+ - **password**: min 5 characters - - **Valid**: return HTTP 200 - -@@ -489,7 +494,7 @@ Note - A user account will be valid only if the email has been validated (a - validation link is sent to the email address). - --### Update a user (Index) -+#### 4.2.2 Update a user (Index) - - PUT /v1/users/\ - -@@ -501,7 +506,7 @@ Note - We can also update email address, if they do, they will need to reverify - their new email address. - --### Login (Index) -+#### 4.2.3 Login (Index) - - Does nothing else but asking for a user authentication. Can be used to - validate credentials. HTTP Basic Auth for now, maybe change in future. -@@ -509,11 +514,11 @@ validate credentials. HTTP Basic Auth for now, maybe change in future. - GET /v1/users - - **Return**: --: - Valid: HTTP 200 -- - Invalid login: HTTP 401 -- - Account inactive: HTTP 403 Account is not Active -+: - Valid: HTTP 200 -+ - Invalid login: HTTP 401 -+ - Account inactive: HTTP 403 Account is not Active - --### Tags (Registry) -+### 4.3 Tags (Registry) - - The Registry does not know anything about users. Even though - repositories are under usernames, it’s just a namespace for the -@@ -522,11 +527,11 @@ per user later, without modifying the Registry’s API. - - The following naming restrictions apply: - --- Namespaces must match the same regular expression as usernames (See -+- Namespaces must match the same regular expression as usernames (See - 4.2.1.) --- Repository names must match the regular expression [a-zA-Z0-9-\_.] -+- Repository names must match the regular expression [a-zA-Z0-9-\_.] - --### Get all tags: -+#### 4.3.1 Get all tags - - GET /v1/repositories/\/\/tags - -@@ -536,25 +541,25 @@ GET /v1/repositories/\/\/tags - “0.1.1”: - “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087” } - --### Read the content of a tag (resolve the image id): -+#### 4.3.2 Read the content of a tag (resolve the image id) - - GET /v1/repositories/\/\/tags/\ - - **Return**: - : “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f” - --### Delete a tag (registry): -+#### 4.3.3 Delete a tag (registry) - - DELETE /v1/repositories/\/\/tags/\ - --## Images (Index) -+### 4.4 Images (Index) - - For the Index to “resolve” the repository name to a Registry location, - it uses the X-Docker-Endpoints header. In other terms, this requests - always add a `X-Docker-Endpoints` to indicate the - location of the registry which hosts this repository. - --### Get the images: -+#### 4.4.1 Get the images - - GET /v1/repositories/\/\/images - -@@ -562,9 +567,9 @@ GET /v1/repositories/\/\/images - : [{“id”: - “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, - “checksum”: -- “[md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087](md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087)”}] -+ “”}] - --### Add/update the images: -+#### 4.4.2 Add/update the images - - You always add images, you never remove them. - -@@ -579,15 +584,15 @@ PUT /v1/repositories/\/\/images - - **Return** 204 - --### Repositories -+### 4.5 Repositories - --### Remove a Repository (Registry) -+#### 4.5.1 Remove a Repository (Registry) - - DELETE /v1/repositories/\/\ - - Return 200 OK - --### Remove a Repository (Index) -+#### 4.5.2 Remove a Repository (Index) - - This starts the delete process. see 2.3 for more details. - -@@ -595,12 +600,12 @@ DELETE /v1/repositories/\/\ - - Return 202 OK - --## Chaining Registries -+## 5. Chaining Registries - - It’s possible to chain Registries server for several reasons: - --- Load balancing --- Delegate the next request to another server -+- Load balancing -+- Delegate the next request to another server - - When a Registry is a reference for a repository, it should host the - entire images chain in order to avoid breaking the chain during the -@@ -618,9 +623,9 @@ On every request, a special header can be returned: - On the next request, the client will always pick a server from this - list. - --## Authentication & Authorization -+## 6. Authentication & Authorization - --### On the Index -+### 6.1 On the Index - - The Index supports both “Basic” and “Token” challenges. Usually when - there is a `401 Unauthorized`, the Index replies -@@ -634,16 +639,16 @@ You have 3 options: - 1. Provide user credentials and ask for a token - - > **Header**: -- > : - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== -- > - X-Docker-Token: true -+ > : - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== -+ > - X-Docker-Token: true - > - > In this case, along with the 200 response, you’ll get a new token - > (if user auth is ok): If authorization isn’t correct you get a 401 - > response. If account isn’t active you will get a 403 response. - > - > **Response**: -- > : - 200 OK -- > - X-Docker-Token: Token -+ > : - 200 OK -+ > - X-Docker-Token: Token - > signature=123abc,repository=”foo/bar”,access=read - > - 2. Provide user credentials only -@@ -681,9 +686,9 @@ Next request: - GET /(...) - Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4=" - --## Document Version -+## 7 Document Version - --- 1.0 : May 6th 2013 : initial release --- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new -+- 1.0 : May 6th 2013 : initial release -+- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new - source namespace. - -diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md -index 0392da3..4991924 100644 ---- a/docs/sources/reference/api/remote_api_client_libraries.md -+++ b/docs/sources/reference/api/remote_api_client_libraries.md -@@ -4,115 +4,82 @@ page_keywords: API, Docker, index, registry, REST, documentation, clients, Pytho - - # Docker Remote API Client Libraries - --## Introduction -- - These libraries have not been tested by the Docker Maintainers for - compatibility. Please file issues with the library owners. If you find - more library implementations, please list them in Docker doc bugs and we - will add the libraries here. - --Language/Framework -- --Name -- --Repository -- --Status -- --Python -- --docker-py -- --[https://github.com/dotcloud/docker-py](https://github.com/dotcloud/docker-py) -- --Active -- --Ruby -- --docker-client -- --[https://github.com/geku/docker-client](https://github.com/geku/docker-client) -- --Outdated -- --Ruby -- --docker-api -- --[https://github.com/swipely/docker-api](https://github.com/swipely/docker-api) -- --Active -- --JavaScript (NodeJS) -- --dockerode -- --[https://github.com/apocas/dockerode](https://github.com/apocas/dockerode) --Install via NPM: npm install dockerode -- --Active -- --JavaScript (NodeJS) -- --docker.io -- --[https://github.com/appersonlabs/docker.io](https://github.com/appersonlabs/docker.io) --Install via NPM: npm install docker.io -- --Active -- --JavaScript -- --docker-js -- --[https://github.com/dgoujard/docker-js](https://github.com/dgoujard/docker-js) -- --Active -- --JavaScript (Angular) **WebUI** -- --docker-cp -- --[https://github.com/13W/docker-cp](https://github.com/13W/docker-cp) -- --Active -- --JavaScript (Angular) **WebUI** -- --dockerui -- --[https://github.com/crosbymichael/dockerui](https://github.com/crosbymichael/dockerui) -+ ------------------------------------------------------------------------- -+ Language/Framewor Name Repository Status -+ k -+ ----------------- ------------ ---------------------------------- ------- -+ Python docker-py [https://github.com/dotcloud/docke Active -+ r-py](https://github.com/dotcloud/ -+ docker-py) - --Active -+ Ruby docker-clien [https://github.com/geku/docker-cl Outdate -+ t ient](https://github.com/geku/dock d -+ er-client) - --Java -+ Ruby docker-api [https://github.com/swipely/docker Active -+ -api](https://github.com/swipely/d -+ ocker-api) - --docker-java -+ JavaScript dockerode [https://github.com/apocas/dockero Active -+ (NodeJS) de](https://github.com/apocas/dock -+ erode) -+ Install via NPM: npm install -+ dockerode - --[https://github.com/kpelykh/docker-java](https://github.com/kpelykh/docker-java) -+ JavaScript docker.io [https://github.com/appersonlabs/d Active -+ (NodeJS) ocker.io](https://github.com/apper -+ sonlabs/docker.io) -+ Install via NPM: npm install -+ docker.io - --Active -+ JavaScript docker-js [https://github.com/dgoujard/docke Outdate -+ r-js](https://github.com/dgoujard/ d -+ docker-js) - --Erlang -+ JavaScript docker-cp [https://github.com/13W/docker-cp] Active -+ (Angular) (https://github.com/13W/docker-cp) -+ **WebUI** - --erldocker -+ JavaScript dockerui [https://github.com/crosbymichael/ Active -+ (Angular) dockerui](https://github.com/crosb -+ **WebUI** ymichael/dockerui) - --[https://github.com/proger/erldocker](https://github.com/proger/erldocker) -+ Java docker-java [https://github.com/kpelykh/docker Active -+ -java](https://github.com/kpelykh/ -+ docker-java) - --Active -+ Erlang erldocker [https://github.com/proger/erldock Active -+ er](https://github.com/proger/erld -+ ocker) - --Go -+ Go go-dockercli [https://github.com/fsouza/go-dock Active -+ ent erclient](https://github.com/fsouz -+ a/go-dockerclient) - --go-dockerclient -+ Go dockerclient [https://github.com/samalba/docker Active -+ client](https://github.com/samalba -+ /dockerclient) - --[https://github.com/fsouza/go-dockerclient](https://github.com/fsouza/go-dockerclient) -+ PHP Alvine [http://pear.alvine.io/](http://pe Active -+ ar.alvine.io/) -+ (alpha) - --Active -+ PHP Docker-PHP [http://stage1.github.io/docker-ph Active -+ p/](http://stage1.github.io/docker -+ -php/) - --PHP -+ Perl Net::Docker [https://metacpan.org/pod/Net::Doc Active -+ ker](https://metacpan.org/pod/Net: -+ :Docker) - --Alvine -+ Perl Eixo::Docker [https://github.com/alambike/eixo- Active -+ docker](https://github.com/alambik -+ e/eixo-docker) -+ ------------------------------------------------------------------------- - --[http://pear.alvine.io/](http://pear.alvine.io/) (alpha) - --Active -diff --git a/docs/sources/reference/commandline.md b/docs/sources/reference/commandline.md -index 6f7a779..b2fb7e0 100644 ---- a/docs/sources/reference/commandline.md -+++ b/docs/sources/reference/commandline.md -@@ -1,7 +1,7 @@ - - # Commands - --## Contents: -+Contents: - - - [Command Line Help](cli/) - - [Options](cli/#options) -diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md -index 9d825ce..3deac40 100644 ---- a/docs/sources/reference/run.md -+++ b/docs/sources/reference/run.md -@@ -2,7 +2,7 @@ page_title: Docker Run Reference - page_description: Configure containers at runtime - page_keywords: docker, run, configure, runtime - --# Docker Run Reference -+# [Docker Run Reference](#id2) - - **Docker runs processes in isolated containers**. When an operator - executes `docker run`, she starts a process with its -@@ -25,7 +25,7 @@ Table of Contents - - [Overriding `Dockerfile` Image - Defaults](#overriding-dockerfile-image-defaults) - --## General Form -+## [General Form](#id3) - - As you’ve seen in the [*Examples*](../../examples/#example-list), the - basic run command takes this form: -@@ -52,7 +52,7 @@ control over runtime behavior to the operator, allowing them to override - all defaults set by the developer during `docker build`{.docutils - .literal} and nearly all the defaults set by the Docker runtime itself. - --## Operator Exclusive Options -+## [Operator Exclusive Options](#id4) - - Only the operator (the person executing `docker run`{.docutils - .literal}) can set the following options. -@@ -60,19 +60,17 @@ Only the operator (the person executing `docker run`{.docutils - - [Detached vs Foreground](#detached-vs-foreground) - - [Detached (-d)](#detached-d) - - [Foreground](#foreground) -- - - [Container Identification](#container-identification) -- - [Name (-name)](#name-name) -+ - [Name (–name)](#name-name) - - [PID Equivalent](#pid-equivalent) -- - - [Network Settings](#network-settings) --- [Clean Up (-rm)](#clean-up-rm) -+- [Clean Up (–rm)](#clean-up-rm) - - [Runtime Constraints on CPU and - Memory](#runtime-constraints-on-cpu-and-memory) - - [Runtime Privilege and LXC - Configuration](#runtime-privilege-and-lxc-configuration) - --### Detached vs Foreground -+### [Detached vs Foreground](#id6) - - When starting a Docker container, you must first decide if you want to - run the container in the background in a “detached” mode or in the -@@ -80,7 +78,7 @@ default foreground mode: - - -d=false: Detached mode: Run container in the background, print new container id - --**Detached (-d)** -+#### [Detached (-d)](#id7) - - In detached mode (`-d=true` or just `-d`{.docutils - .literal}), all I/O should be done through network connections or shared -@@ -88,10 +86,10 @@ volumes because the container is no longer listening to the commandline - where you executed `docker run`. You can reattach to - a detached container with `docker` - [*attach*](../commandline/cli/#cli-attach). If you choose to run a --container in the detached mode, then you cannot use the `-rm`{.docutils -+container in the detached mode, then you cannot use the `--rm`{.docutils - .literal} option. - --**Foreground** -+#### [Foreground](#id8) - - In foreground mode (the default when `-d` is not - specified), `docker run` can start the process in -@@ -100,10 +98,10 @@ output, and standard error. It can even pretend to be a TTY (this is - what most commandline executables expect) and pass along signals. All of - that is configurable: - -- -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` -- -t=false : Allocate a pseudo-tty -- -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) -- -i=false : Keep STDIN open even if not attached -+ -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` -+ -t=false : Allocate a pseudo-tty -+ --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) -+ -i=false : Keep STDIN open even if not attached - - If you do not specify `-a` then Docker will [attach - everything -@@ -119,9 +117,9 @@ as well as persistent standard input (`stdin`), so - you’ll use `-i -t` together in most interactive - cases. - --### Container Identification -+### [Container Identification](#id9) - --**Name (-name)** -+#### [Name (–name)](#id10) - - The operator can identify a container in three ways: - -@@ -131,27 +129,27 @@ The operator can identify a container in three ways: - - Name (“evil\_ptolemy”) - - The UUID identifiers come from the Docker daemon, and if you do not --assign a name to the container with `-name` then the --daemon will also generate a random string name too. The name can become --a handy way to add meaning to a container since you can use this name --when defining -+assign a name to the container with `--name` then -+the daemon will also generate a random string name too. The name can -+become a handy way to add meaning to a container since you can use this -+name when defining - [*links*](../../use/working_with_links_names/#working-with-links-names) - (or any other place you need to identify a container). This works for - both background and foreground Docker containers. - --**PID Equivalent** -+#### [PID Equivalent](#id11) - - And finally, to help with automation, you can have Docker write the - container ID out to a file of your choosing. This is similar to how some - programs might write out their process ID to a file (you’ve seen them as - PID files): - -- -cidfile="": Write the container ID to the file -+ --cidfile="": Write the container ID to the file - --### Network Settings -+### [Network Settings](#id12) - - -n=true : Enable networking for this container -- -dns=[] : Set custom dns servers for the container -+ --dns=[] : Set custom dns servers for the container - - By default, all containers have networking enabled and they can make any - outgoing connections. The operator can completely disable networking -@@ -160,9 +158,9 @@ outgoing networking. In cases like this, you would perform I/O through - files or STDIN/STDOUT only. - - Your container will use the same DNS servers as the host by default, but --you can override this with `-dns`. -+you can override this with `--dns`. - --### Clean Up (-rm) -+### [Clean Up (–rm)](#id13) - - By default a container’s file system persists even after the container - exits. This makes debugging a lot easier (since you can inspect the -@@ -170,11 +168,11 @@ final state) and you retain all your data by default. But if you are - running short-term **foreground** processes, these container file - systems can really pile up. If instead you’d like Docker to - **automatically clean up the container and remove the file system when --the container exits**, you can add the `-rm` flag: -+the container exits**, you can add the `--rm` flag: - -- -rm=false: Automatically remove the container when it exits (incompatible with -d) -+ --rm=false: Automatically remove the container when it exits (incompatible with -d) - --### Runtime Constraints on CPU and Memory -+### [Runtime Constraints on CPU and Memory](#id14) - - The operator can also adjust the performance parameters of the - container: -@@ -193,10 +191,10 @@ the same priority and get the same proportion of CPU cycles, but you can - tell the kernel to give more shares of CPU time to one or more - containers when you start them via Docker. - --### Runtime Privilege and LXC Configuration -+### [Runtime Privilege and LXC Configuration](#id15) - -- -privileged=false: Give extended privileges to this container -- -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -+ --privileged=false: Give extended privileges to this container -+ --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - - By default, Docker containers are “unprivileged” and cannot, for - example, run a Docker daemon inside a Docker container. This is because -@@ -206,23 +204,26 @@ by default a container is not allowed to access any devices, but a - and documentation on [cgroups - devices](https://www.kernel.org/doc/Documentation/cgroups/devices.txt)). - --When the operator executes `docker run -privileged`, --Docker will enable to access to all devices on the host as well as set --some configuration in AppArmor to allow the container nearly all the --same access to the host as processes running outside containers on the --host. Additional information about running with `-privileged`{.docutils --.literal} is available on the [Docker -+When the operator executes `docker run --privileged`{.docutils -+.literal}, Docker will enable to access to all devices on the host as -+well as set some configuration in AppArmor to allow the container nearly -+all the same access to the host as processes running outside containers -+on the host. Additional information about running with -+`--privileged` is available on the [Docker - Blog](http://blog.docker.io/2013/09/docker-can-now-run-within-docker/). - --An operator can also specify LXC options using one or more --`-lxc-conf` parameters. These can be new parameters -+If the Docker daemon was started using the `lxc` -+exec-driver (`docker -d --exec-driver=lxc`) then the -+operator can also specify LXC options using one or more -+`--lxc-conf` parameters. These can be new parameters - or override existing parameters from the - [lxc-template.go](https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go). - Note that in the future, a given host’s Docker daemon may not use LXC, - so this is an implementation-specific configuration meant for operators - already familiar with using LXC directly. - --## Overriding `Dockerfile` Image Defaults -+## [Overriding `Dockerfile` Image Defaults](#id5) -+ - When a developer builds an image from a - [*Dockerfile*](../builder/#dockerbuilder) or when she commits it, the - developer can set a number of default parameters that take effect when -@@ -244,7 +245,7 @@ how the operator can override that setting. - - [USER](#user) - - [WORKDIR](#workdir) - --### CMD (Default Command or Options) -+### [CMD (Default Command or Options)](#id16) - - Recall the optional `COMMAND` in the Docker - commandline: -@@ -262,9 +263,9 @@ If the image also specifies an `ENTRYPOINT` then the - `CMD` or `COMMAND`{.docutils .literal} get appended - as arguments to the `ENTRYPOINT`. - --### ENTRYPOINT (Default Command to Execute at Runtime -+### [ENTRYPOINT (Default Command to Execute at Runtime](#id17) - -- -entrypoint="": Overwrite the default entrypoint set by the image -+ --entrypoint="": Overwrite the default entrypoint set by the image - - The ENTRYPOINT of an image is similar to a `COMMAND` - because it specifies what executable to run when the container starts, -@@ -280,14 +281,14 @@ the new `ENTRYPOINT`. Here is an example of how to - run a shell in a container that has been set up to automatically run - something else (like `/usr/bin/redis-server`): - -- docker run -i -t -entrypoint /bin/bash example/redis -+ docker run -i -t --entrypoint /bin/bash example/redis - - or two examples of how to pass more parameters to that ENTRYPOINT: - -- docker run -i -t -entrypoint /bin/bash example/redis -c ls -l -- docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help -+ docker run -i -t --entrypoint /bin/bash example/redis -c ls -l -+ docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help - --### EXPOSE (Incoming Ports) -+### [EXPOSE (Incoming Ports)](#id18) - - The `Dockerfile` doesn’t give much control over - networking, only providing the `EXPOSE` instruction -@@ -295,17 +296,17 @@ to give a hint to the operator about what incoming ports might provide - services. The following options work with or override the - `Dockerfile`‘s exposed defaults: - -- -expose=[]: Expose a port from the container -+ --expose=[]: Expose a port from the container - without publishing it to your host -- -P=false : Publish all exposed ports to the host interfaces -- -p=[] : Publish a container's port to the host (format: -- ip:hostPort:containerPort | ip::containerPort | -- hostPort:containerPort) -- (use 'docker port' to see the actual mapping) -- -link="" : Add link to another container (name:alias) -+ -P=false : Publish all exposed ports to the host interfaces -+ -p=[] : Publish a container's port to the host (format: -+ ip:hostPort:containerPort | ip::containerPort | -+ hostPort:containerPort) -+ (use 'docker port' to see the actual mapping) -+ --link="" : Add link to another container (name:alias) - - As mentioned previously, `EXPOSE` (and --`-expose`) make a port available **in** a container -+`--expose`) make a port available **in** a container - for incoming connections. The port number on the inside of the container - (where the service listens) does not need to be the same number as the - port exposed on the outside of the container (where clients connect), so -@@ -315,11 +316,11 @@ inside the container you might have an HTTP service listening on port 80 - might be 42800. - - To help a new client container reach the server container’s internal --port operator `-expose`‘d by the operator or -+port operator `--expose`‘d by the operator or - `EXPOSE`‘d by the developer, the operator has three - choices: start the server container with `-P` or - `-p,` or start the client container with --`-link`. -+`--link`. - - If the operator uses `-P` or `-p`{.docutils - .literal} then Docker will make the exposed port accessible on the host -@@ -327,20 +328,20 @@ and the ports will be available to any client that can reach the host. - To find the map between the host ports and the exposed ports, use - `docker port`) - --If the operator uses `-link` when starting the new -+If the operator uses `--link` when starting the new - client container, then the client container can access the exposed port - via a private networking interface. Docker will set some environment - variables in the client container to help indicate which interface and - port to use. - --### ENV (Environment Variables) -+### [ENV (Environment Variables)](#id19) - - The operator can **set any environment variable** in the container by - using one or more `-e` flags, even overriding those - already defined by the developer with a Dockefile `ENV`{.docutils - .literal}: - -- $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export -+ $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export - declare -x HOME="/" - declare -x HOSTNAME="85bc26a0e200" - declare -x OLDPWD -@@ -353,13 +354,13 @@ already defined by the developer with a Dockefile `ENV`{.docutils - Similarly the operator can set the **hostname** with `-h`{.docutils - .literal}. - --`-link name:alias` also sets environment variables, -+`--link name:alias` also sets environment variables, - using the *alias* string to define environment variables within the - container that give the IP and PORT information for connecting to the - service container. Let’s imagine we have a container running Redis: - - # Start the service container, named redis-name -- $ docker run -d -name redis-name dockerfiles/redis -+ $ docker run -d --name redis-name dockerfiles/redis - 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 - - # The redis-name container exposed port 6379 -@@ -372,10 +373,10 @@ service container. Let’s imagine we have a container running Redis: - 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f - - Yet we can get information about the Redis container’s exposed ports --with `-link`. Choose an alias that will form a valid --environment variable! -+with `--link`. Choose an alias that will form a -+valid environment variable! - -- $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export -+ $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export - declare -x HOME="/" - declare -x HOSTNAME="acda7f7b1cdc" - declare -x OLDPWD -@@ -393,14 +394,14 @@ environment variable! - And we can use that information to connect from another container as a - client: - -- $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' -+ $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' - 172.17.0.32:6379> - --### VOLUME (Shared Filesystems) -+### [VOLUME (Shared Filesystems)](#id20) - - -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. - If "container-dir" is missing, then docker creates a new volume. -- -volumes-from="": Mount all volumes from the given container(s) -+ --volumes-from="": Mount all volumes from the given container(s) - - The volumes commands are complex enough to have their own documentation - in section [*Share Directories via -@@ -409,7 +410,7 @@ define one or more `VOLUME`s associated with an - image, but only the operator can give access from one container to - another (or from a container to a volume mounted on the host). - --### USER -+### [USER](#id21) - - The default user within a container is `root` (id = - 0), but if the developer created additional users, those are accessible -@@ -419,7 +420,7 @@ override it - - -u="": Username or UID - --### WORKDIR -+### [WORKDIR](#id22) - - The default working directory for running binaries within a container is - the root directory (`/`), but the developer can set -diff --git a/docs/sources/search.md b/docs/sources/search.md -index 0e2e13f..0296d50 100644 ---- a/docs/sources/search.md -+++ b/docs/sources/search.md -@@ -1,8 +1,7 @@ --# Search - --*Please activate JavaScript to enable the search functionality.* -+# Search {#search-documentation} - --## How To Search -+Please activate JavaScript to enable the search functionality. - - From here you can search these documents. Enter your search words into - the box below and click "search". Note that the search function will -diff --git a/docs/sources/terms.md b/docs/sources/terms.md -index 59579d9..5152876 100644 ---- a/docs/sources/terms.md -+++ b/docs/sources/terms.md -@@ -1,13 +1,14 @@ -+ - # Glossary - --*Definitions of terms used in Docker documentation.* -+Definitions of terms used in Docker documentation. - --## Contents: -+Contents: - --- [File System](filesystem/) --- [Layers](layer/) --- [Image](image/) --- [Container](container/) --- [Registry](registry/) --- [Repository](repository/) -+- [File System](filesystem/) -+- [Layers](layer/) -+- [Image](image/) -+- [Container](container/) -+- [Registry](registry/) -+- [Repository](repository/) - -diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md -index bc493d4..6fbf952 100644 ---- a/docs/sources/terms/container.md -+++ b/docs/sources/terms/container.md -@@ -4,8 +4,6 @@ page_keywords: containers, lxc, concepts, explanation, image, container - - # Container - --## Introduction -- - ![](../../_images/docker-filesystems-busyboxrw.png) - - Once you start a process in Docker from an -diff --git a/docs/sources/terms/filesystem.md b/docs/sources/terms/filesystem.md -index 2038d00..8fbd977 100644 ---- a/docs/sources/terms/filesystem.md -+++ b/docs/sources/terms/filesystem.md -@@ -4,8 +4,6 @@ page_keywords: containers, files, linux - - # File System - --## Introduction -- - ![](../../_images/docker-filesystems-generic.png) - - In order for a Linux system to run, it typically needs two [file -diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md -index 721d4c9..98914dd 100644 ---- a/docs/sources/terms/image.md -+++ b/docs/sources/terms/image.md -@@ -4,8 +4,6 @@ page_keywords: containers, lxc, concepts, explanation, image, container - - # Image - --## Introduction -- - ![](../../_images/docker-filesystems-debian.png) - - In Docker terminology, a read-only [*Layer*](../layer/#layer-def) is -diff --git a/docs/sources/terms/layer.md b/docs/sources/terms/layer.md -index 7665467..6949d5c 100644 ---- a/docs/sources/terms/layer.md -+++ b/docs/sources/terms/layer.md -@@ -4,8 +4,6 @@ page_keywords: containers, lxc, concepts, explanation, image, container - - # Layers - --## Introduction -- - In a traditional Linux boot, the kernel first mounts the root [*File - System*](../filesystem/#filesystem-def) as read-only, checks its - integrity, and then switches the whole rootfs volume to read-write mode. -diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md -index 0d5af2c..53c0a24 100644 ---- a/docs/sources/terms/registry.md -+++ b/docs/sources/terms/registry.md -@@ -4,8 +4,6 @@ page_keywords: containers, lxc, concepts, explanation, image, repository, contai - - # Registry - --## Introduction -- - A Registry is a hosted service containing - [*repositories*](../repository/#repository-def) of - [*images*](../image/#image-def) which responds to the Registry API. -@@ -14,7 +12,5 @@ The default registry can be accessed using a browser at - [http://images.docker.io](http://images.docker.io) or using the - `sudo docker search` command. - --## Further Reading -- - For more information see [*Working with - Repositories*](../../use/workingwithrepository/#working-with-the-repository) -diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md -index e3332e4..8868440 100644 ---- a/docs/sources/terms/repository.md -+++ b/docs/sources/terms/repository.md -@@ -4,8 +4,6 @@ page_keywords: containers, lxc, concepts, explanation, image, repository, contai - - # Repository - --## Introduction -- - A repository is a set of images either on your local Docker server, or - shared, by pushing it to a [*Registry*](../registry/#registry-def) - server. -diff --git a/docs/sources/toctree.md b/docs/sources/toctree.md -index 259a231..b268e90 100644 ---- a/docs/sources/toctree.md -+++ b/docs/sources/toctree.md -@@ -1,14 +1,18 @@ -+page_title: Documentation -+page_description: -- todo: change me -+page_keywords: todo, docker, documentation, installation, usage, examples, contributing, faq, command line, concepts -+ - # Documentation - --## This documentation has the following resources: -- --- [Introduction](../) --- [Installation](../installation/) --- [Use](../use/) --- [Examples](../examples/) --- [Reference Manual](../reference/) --- [Contributing](../contributing/) --- [Glossary](../terms/) --- [Articles](../articles/) --- [FAQ](../faq/) -+This documentation has the following resources: -+ -+- [Introduction](../) -+- [Installation](../installation/) -+- [Use](../use/) -+- [Examples](../examples/) -+- [Reference Manual](../reference/) -+- [Contributing](../contributing/) -+- [Glossary](../terms/) -+- [Articles](../articles/) -+- [FAQ](../faq/) - -diff --git a/docs/sources/use.md b/docs/sources/use.md -index ce4a510..00077a5 100644 ---- a/docs/sources/use.md -+++ b/docs/sources/use.md -@@ -1,13 +1,16 @@ -+ - # Use - --## Contents: -- --- [First steps with Docker](basics/) --- [Share Images via Repositories](workingwithrepository/) --- [Redirect Ports](port_redirection/) --- [Configure Networking](networking/) --- [Automatically Start Containers](host_integration/) --- [Share Directories via Volumes](working_with_volumes/) --- [Link Containers](working_with_links_names/) --- [Link via an Ambassador Container](ambassador_pattern_linking/) --- [Using Puppet](puppet/) -\ No newline at end of file -+Contents: -+ -+- [First steps with Docker](basics/) -+- [Share Images via Repositories](workingwithrepository/) -+- [Redirect Ports](port_redirection/) -+- [Configure Networking](networking/) -+- [Automatically Start Containers](host_integration/) -+- [Share Directories via Volumes](working_with_volumes/) -+- [Link Containers](working_with_links_names/) -+- [Link via an Ambassador Container](ambassador_pattern_linking/) -+- [Using Chef](chef/) -+- [Using Puppet](puppet/) -+ -diff --git a/docs/sources/use/ambassador_pattern_linking.md b/docs/sources/use/ambassador_pattern_linking.md -index b5df7f8..f7704a5 100644 ---- a/docs/sources/use/ambassador_pattern_linking.md -+++ b/docs/sources/use/ambassador_pattern_linking.md -@@ -4,8 +4,6 @@ page_keywords: Examples, Usage, links, docker, documentation, examples, names, n - - # Link via an Ambassador Container - --## Introduction -- - Rather than hardcoding network links between a service consumer and - provider, Docker encourages service portability. - -@@ -38,24 +36,24 @@ link wiring is controlled entirely from the `docker run`{.docutils - - Start actual redis server on one Docker host - -- big-server $ docker run -d -name redis crosbymichael/redis -+ big-server $ docker run -d --name redis crosbymichael/redis - - Then add an ambassador linked to the redis server, mapping a port to the - outside world - -- big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador -+ big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador - - On the other host, you can set up another ambassador setting environment - variables for each remote port we want to proxy to the - `big-server` - -- client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador -+ client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador - - Then on the `client-server` host, you can use a - redis client container to talk to the remote redis server, just by - linking to the local redis ambassador. - -- client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli -+ client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -@@ -68,19 +66,19 @@ The following example shows what the `svendowideit/ambassador`{.docutils - On the docker host (192.168.1.52) that redis will run on: - - # start actual redis server -- $ docker run -d -name redis crosbymichael/redis -+ $ docker run -d --name redis crosbymichael/redis - - # get a redis-cli container for connection testing - $ docker pull relateiq/redis-cli - - # test the redis server by talking to it directly -- $ docker run -t -i -rm -link redis:redis relateiq/redis-cli -+ $ docker run -t -i --rm --link redis:redis relateiq/redis-cli - redis 172.17.0.136:6379> ping - PONG - ^D - - # add redis ambassador -- $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh -+ $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh - - in the redis\_ambassador container, you can see the linked redis - containers’s env -@@ -104,7 +102,7 @@ to the world (via the -p 6379:6379 port mapping) - - $ docker rm redis_ambassador - $ sudo ./contrib/mkimage-unittest.sh -- $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh -+ $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 - -@@ -113,14 +111,14 @@ then ping the redis server via the ambassador - Now goto a different server - - $ sudo ./contrib/mkimage-unittest.sh -- $ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh -+ $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 - - and get the redis-cli image so we can talk over the ambassador bridge - - $ docker pull relateiq/redis-cli -- $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli -+ $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -@@ -133,7 +131,7 @@ out the (possibly multiple) link environment variables to set up the - port forwarding. On the remote host, you need to set the variable using - the `-e` command line option. - --`-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`{.docutils -+`--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`{.docutils - .literal} will forward the local `1234` port to the - remote IP and port - in this case `192.168.1.52:6379`{.docutils - .literal}. -@@ -146,12 +144,12 @@ remote IP and port - in this case `192.168.1.52:6379`{.docutils - # docker build -t SvenDowideit/ambassador . - # docker tag SvenDowideit/ambassador ambassador - # then to run it (on the host that has the real backend on it) -- # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador -+ # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador - # on the remote host, you can set up another ambassador -- # docker run -t -i -name redis_ambassador -expose 6379 sh -+ # docker run -t -i --name redis_ambassador --expose 6379 sh - - FROM docker-ut - MAINTAINER SvenDowideit@home.org.au - - -- CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top -\ No newline at end of file -+ CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top -diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md -index 1b10335..0abc8e7 100644 ---- a/docs/sources/use/basics.md -+++ b/docs/sources/use/basics.md -@@ -37,7 +37,10 @@ hash `539c0211cd76: Download complete` which is the - short form of the image ID. These short image IDs are the first 12 - characters of the full image ID - which can be found using - `docker inspect` or --`docker images -notrunc=true` -+`docker images --no-trunc=true` -+ -+**If you’re using OS X** then you shouldn’t use `sudo`{.docutils -+.literal} - - ## Running an interactive shell - -diff --git a/docs/sources/use/host_integration.md b/docs/sources/use/host_integration.md -index 50eae8b..a7dba9b 100644 ---- a/docs/sources/use/host_integration.md -+++ b/docs/sources/use/host_integration.md -@@ -5,7 +5,8 @@ page_keywords: systemd, upstart, supervisor, docker, documentation, host integra - # Automatically Start Containers - - You can use your Docker containers with process managers like --`upstart`, `systemd`{.docutils .literal} and `supervisor`. -+`upstart`, `systemd`{.docutils .literal} and -+`supervisor`. - - ## Introduction - -@@ -15,21 +16,22 @@ docker will not automatically restart your containers when the host is - restarted. - - When you have finished setting up your image and are happy with your --running container, you may want to use a process manager to manage it. -+running container, you can then attach a process manager to manage it. - When your run `docker start -a` docker will --automatically attach to the process and forward all signals so that the --process manager can detect when a container stops and correctly restart --it. -+automatically attach to the running container, or start it if needed and -+forward all signals so that the process manager can detect when a -+container stops and correctly restart it. - - Here are a few sample scripts for systemd and upstart to integrate with - docker. - - ## Sample Upstart Script - --In this example we’ve already created a container to run Redis with an --id of 0a7e070b698b. To create an upstart script for our container, we --create a file named `/etc/init/redis.conf` and place --the following into it: -+In this example we’ve already created a container to run Redis with -+`--name redis_server`. To create an upstart script -+for our container, we create a file named -+`/etc/init/redis.conf` and place the following into -+it: - - description "Redis container" - author "Me" -@@ -42,7 +44,7 @@ the following into it: - while [ ! -e $FILE ] ; do - inotifywait -t 2 -e create $(dirname $FILE) - done -- /usr/bin/docker start -a 0a7e070b698b -+ /usr/bin/docker start -a redis_server - end script - - Next, we have to configure docker so that it’s run with the option -@@ -59,8 +61,8 @@ Next, we have to configure docker so that it’s run with the option - - [Service] - Restart=always -- ExecStart=/usr/bin/docker start -a 0a7e070b698b -- ExecStop=/usr/bin/docker stop -t 2 0a7e070b698b -+ ExecStart=/usr/bin/docker start -a redis_server -+ ExecStop=/usr/bin/docker stop -t 2 redis_server - - [Install] - WantedBy=local.target -diff --git a/docs/sources/use/networking.md b/docs/sources/use/networking.md -index e4cc5c5..56a9885 100644 ---- a/docs/sources/use/networking.md -+++ b/docs/sources/use/networking.md -@@ -4,16 +4,15 @@ page_keywords: network, networking, bridge, docker, documentation - - # Configure Networking - --## Introduction -- - Docker uses Linux bridge capabilities to provide network connectivity to - containers. The `docker0` bridge interface is - managed by Docker for this purpose. When the Docker daemon starts it : - --- creates the `docker0` bridge if not present --- searches for an IP address range which doesn’t overlap with an existing route --- picks an IP in the selected range --- assigns this IP to the `docker0` bridge -+- creates the `docker0` bridge if not present -+- searches for an IP address range which doesn’t overlap with an -+ existing route -+- picks an IP in the selected range -+- assigns this IP to the `docker0` bridge - - - -@@ -113,9 +112,9 @@ The value of the Docker daemon’s `icc` parameter - determines whether containers can communicate with each other over the - bridge network. - --- The default, `-icc=true` allows containers to -+- The default, `--icc=true` allows containers to - communicate with each other. --- `-icc=false` means containers are isolated from -+- `--icc=false` means containers are isolated from - each other. - - Docker uses `iptables` under the hood to either -@@ -137,6 +136,6 @@ ip link command) and the namespaces infrastructure. - - ## I want more - --Jérôme Petazzoni has create `pipework` to connect -+Jérôme Petazzoni has created `pipework` to connect - together containers in arbitrarily complex scenarios : - [https://github.com/jpetazzo/pipework](https://github.com/jpetazzo/pipework) -diff --git a/docs/sources/use/port_redirection.md b/docs/sources/use/port_redirection.md -index 6970d0d..1c1b676 100644 ---- a/docs/sources/use/port_redirection.md -+++ b/docs/sources/use/port_redirection.md -@@ -4,8 +4,6 @@ page_keywords: Usage, basic port, docker, documentation, examples - - # Redirect Ports - --## Introduction -- - Interacting with a service is commonly done through a connection to a - port. When this service runs inside a container, one can connect to the - port after finding the IP address of the container as follows: -@@ -74,7 +72,7 @@ port on the host machine bound to a given container port. It is useful - when using dynamically allocated ports: - - # Bind to a dynamically allocated port -- docker run -p 127.0.0.1::8080 -name dyn-bound -+ docker run -p 127.0.0.1::8080 --name dyn-bound - - # Lookup the actual port - docker port dyn-bound 8080 -@@ -105,18 +103,18 @@ started. - - Here is a full example. On `server`, the port of - interest is exposed. The exposure is done either through the --`-expose` parameter to the `docker run`{.docutils -+`--expose` parameter to the `docker run`{.docutils - .literal} command, or the `EXPOSE` build command in - a Dockerfile: - - # Expose port 80 -- docker run -expose 80 -name server -+ docker run --expose 80 --name server - - The `client` then links to the `server`{.docutils - .literal}: - - # Link -- docker run -name client -link server:linked-server -+ docker run --name client --link server:linked-server - - `client` locally refers to `server`{.docutils - .literal} as `linked-server`. The following -@@ -137,4 +135,4 @@ port 80 of `server` and that `server`{.docutils - .literal} is accessible at the IP address 172.17.0.8 - - Note: Using the `-p` parameter also exposes the --port.. -+port. -diff --git a/docs/sources/use/puppet.md b/docs/sources/use/puppet.md -index 55f16dd..b00346c 100644 ---- a/docs/sources/use/puppet.md -+++ b/docs/sources/use/puppet.md -@@ -4,10 +4,12 @@ page_keywords: puppet, installation, usage, docker, documentation - - # Using Puppet - --> *Note:* Please note this is a community contributed installation path. The only --> ‘official’ installation is using the --> [*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation --> path. This version may sometimes be out of date. -+Note -+ -+Please note this is a community contributed installation path. The only -+‘official’ installation is using the -+[*Ubuntu*](../../installation/ubuntulinux/#ubuntu-linux) installation -+path. This version may sometimes be out of date. - - ## Requirements - -diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md -index 3a12284..b41be0d 100644 ---- a/docs/sources/use/working_with_links_names.md -+++ b/docs/sources/use/working_with_links_names.md -@@ -4,8 +4,6 @@ page_keywords: Examples, Usage, links, linking, docker, documentation, examples, - - # Link Containers - --## Introduction -- - From version 0.6.5 you are now able to `name` a - container and `link` it to another container by - referring to its name. This will create a parent -\> child relationship -@@ -15,12 +13,13 @@ where the parent container can see selected information about its child. - - New in version v0.6.5. - --You can now name your container by using the `-name` --flag. If no name is provided, Docker will automatically generate a name. --You can see this name using the `docker ps` command. -+You can now name your container by using the `--name`{.docutils -+.literal} flag. If no name is provided, Docker will automatically -+generate a name. You can see this name using the `docker ps`{.docutils -+.literal} command. - -- # format is "sudo docker run -name " -- $ sudo docker run -name test ubuntu /bin/bash -+ # format is "sudo docker run --name " -+ $ sudo docker run --name test ubuntu /bin/bash - - # the flag "-a" Show all containers. Only running containers are shown by default. - $ sudo docker ps -a -@@ -32,9 +31,9 @@ You can see this name using the `docker ps` command. - New in version v0.6.5. - - Links allow containers to discover and securely communicate with each --other by using the flag `-link name:alias`. -+other by using the flag `--link name:alias`. - Inter-container communication can be disabled with the daemon flag --`-icc=false`. With this flag set to -+`--icc=false`. With this flag set to - `false`, Container A cannot access Container B - unless explicitly allowed via a link. This is a huge win for securing - your containers. When two containers are linked together Docker creates -@@ -52,9 +51,9 @@ communication is set to false. - For example, there is an image called `crosbymichael/redis`{.docutils - .literal} that exposes the port 6379 and starts the Redis server. Let’s - name the container as `redis` based on that image --and run it as daemon. -+and run it as a daemon. - -- $ sudo docker run -d -name redis crosbymichael/redis -+ $ sudo docker run -d --name redis crosbymichael/redis - - We can issue all the commands that you would expect using the name - `redis`; start, stop, attach, using the name for our -@@ -67,9 +66,9 @@ our Redis server we did not use the `-p` flag to - publish the Redis port to the host system. Redis exposed port 6379 and - this is all we need to establish a link. - -- $ sudo docker run -t -i -link redis:db -name webapp ubuntu bash -+ $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash - --When you specified `-link redis:db` you are telling -+When you specified `--link redis:db` you are telling - Docker to link the container named `redis` into this - new container with the alias `db`. Environment - variables are prefixed with the alias so that the parent container can -@@ -101,8 +100,18 @@ Accessing the network information along with the environment of the - child container allows us to easily connect to the Redis service on the - specific IP and port in the environment. - -+Note -+ -+These Environment variables are only set for the first process in the -+container. Similarly, some daemons (such as `sshd`) -+will scrub them when spawning shells for connection. -+ -+You can work around this by storing the initial `env`{.docutils -+.literal} in a file, or looking at `/proc/1/environ`{.docutils -+.literal}. -+ - Running `docker ps` shows the 2 containers, and the --`webapp/db` alias name for the redis container. -+`webapp/db` alias name for the Redis container. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md -index 6cf57ee..542c715 100644 ---- a/docs/sources/use/working_with_volumes.md -+++ b/docs/sources/use/working_with_volumes.md -@@ -4,27 +4,24 @@ page_keywords: Examples, Usage, volume, docker, documentation, examples - - # Share Directories via Volumes - --## Introduction -- - A *data volume* is a specially-designated directory within one or more - containers that bypasses the [*Union File - System*](../../terms/layer/#ufs-def) to provide several useful features - for persistent or shared data: - --- **Data volumes can be shared and reused between containers:** -- This is the feature that makes data volumes so powerful. You can -- use it for anything from hot database upgrades to custom backup or -- replication tools. See the example below. --- **Changes to a data volume are made directly:** -- Without the overhead of a copy-on-write mechanism. This is good for -- very large files. --- **Changes to a data volume will not be included at the next commit:** -- Because they are not recorded as regular filesystem changes in the -- top layer of the [*Union File System*](../../terms/layer/#ufs-def) --- **Volumes persist until no containers use them:** -- As they are a reference counted resource. The container does not need to be -- running to share its volumes, but running it can help protect it -- against accidental removal via `docker rm`. -+- **Data volumes can be shared and reused between containers.** This -+ is the feature that makes data volumes so powerful. You can use it -+ for anything from hot database upgrades to custom backup or -+ replication tools. See the example below. -+- **Changes to a data volume are made directly**, without the overhead -+ of a copy-on-write mechanism. This is good for very large files. -+- **Changes to a data volume will not be included at the next commit** -+ because they are not recorded as regular filesystem changes in the -+ top layer of the [*Union File System*](../../terms/layer/#ufs-def) -+- **Volumes persist until no containers use them** as they are a -+ reference counted resource. The container does not need to be -+ running to share its volumes, but running it can help protect it -+ against accidental removal via `docker rm`. - - Each container can have zero or more data volumes. - -@@ -43,7 +40,7 @@ container with two new volumes: - This command will create the new container with two new volumes that - exits instantly (`true` is pretty much the smallest, - simplest program that you can run). Once created you can mount its --volumes in any other container using the `-volumes-from`{.docutils -+volumes in any other container using the `--volumes-from`{.docutils - .literal} option; irrespective of whether the container is running or - not. - -@@ -51,7 +48,7 @@ Or, you can use the VOLUME instruction in a Dockerfile to add one or - more new volumes to any container created from that image: - - # BUILD-USING: docker build -t data . -- # RUN-USING: docker run -name DATA data -+ # RUN-USING: docker run --name DATA data - FROM busybox - VOLUME ["/var/volume1", "/var/volume2"] - CMD ["/bin/true"] -@@ -66,20 +63,20 @@ it. - Create a named container with volumes to share (`/var/volume1`{.docutils - .literal} and `/var/volume2`): - -- $ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true -+ $ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true - - Then mount those data volumes into your application containers: - -- $ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash -+ $ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash - --You can use multiple `-volumes-from` parameters to -+You can use multiple `--volumes-from` parameters to - bring together multiple data volumes from multiple containers. - - Interestingly, you can mount the volumes that came from the - `DATA` container in yet another container via the - `client1` middleman container: - -- $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash -+ $ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash - - This allows you to abstract the actual data source from users of that - data, similar to -@@ -136,9 +133,9 @@ because they are external to images. Instead you can use - `--volumes-from` to start a new container that can - access the data-container’s volume. For example: - -- $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data -+ $ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data - --- `-rm` - remove the container when it exits -+- `--rm` - remove the container when it exits - - `--volumes-from DATA` - attach to the volumes - shared by the `DATA` container - - `-v $(pwd):/backup` - bind mount the current -@@ -153,13 +150,13 @@ Then to restore to the same container, or another that you’ve made - elsewhere: - - # create a new data container -- $ sudo docker run -v /data -name DATA2 busybox true -+ $ sudo docker run -v /data --name DATA2 busybox true - # untar the backup files into the new container's data volume -- $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar -+ $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar - data/ - data/sven.txt - # compare to the original container -- $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data -+ $ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data - sven.txt - - You can use the basic techniques above to automate backup, migration and -diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md -index bd0e274..1cfec63 100644 ---- a/docs/sources/use/workingwithrepository.md -+++ b/docs/sources/use/workingwithrepository.md -@@ -4,8 +4,6 @@ page_keywords: repo, repositories, usage, pull image, push image, image, documen - - # Share Images via Repositories - --## Introduction -- - A *repository* is a shareable collection of tagged - [*images*](../../terms/image/#image-def) that together create the file - systems for containers. The repository’s name is a label that indicates -@@ -27,14 +25,12 @@ repositories. You can host your own Registry too! Docker acts as a - client for these services via `docker search, pull, login`{.docutils - .literal} and `push`. - --## Repositories -- --### Local Repositories -+## Local Repositories - - Docker images which have been created and labeled on your local Docker - server need to be pushed to a Public or Private registry to be shared. - --### Public Repositories -+## Public Repositories - - There are two types of public repositories: *top-level* repositories - which are controlled by the Docker team, and *user* repositories created -@@ -67,7 +63,7 @@ user name or description: - - Search the docker index for images - -- -notrunc=false: Don't truncate output -+ --no-trunc=false: Don't truncate output - $ sudo docker search centos - Found 25 results matching your query ("centos") - NAME DESCRIPTION -@@ -204,7 +200,7 @@ See also - [Docker Blog: How to use your own - registry](http://blog.docker.io/2013/07/how-to-use-your-own-registry/) - --## Authentication File -+## Authentication file - - The authentication is stored in a json file, `.dockercfg`{.docutils - .literal} located in your home directory. It supports multiple registry diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 6f41142a84..0000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -Sphinx==1.2.1 -sphinxcontrib-httpdomain==1.2.0 diff --git a/docs/sources/articles/baseimages.rst b/docs/sources/articles/baseimages.rst deleted file mode 100644 index 61c8f7d9c5..0000000000 --- a/docs/sources/articles/baseimages.rst +++ /dev/null @@ -1,65 +0,0 @@ -:title: Create a Base Image -:description: How to create base images -:keywords: Examples, Usage, base image, docker, documentation, examples - -.. _base_image_creation: - -Create a Base Image -=================== - -So you want to create your own :ref:`base_image_def`? Great! - -The specific process will depend heavily on the Linux distribution you -want to package. We have some examples below, and you are encouraged -to submit pull requests to contribute new ones. - -Create a full image using tar -............................. - -In general, you'll want to start with a working machine that is -running the distribution you'd like to package as a base image, though -that is not required for some tools like Debian's `Debootstrap -`_, which you can also use to -build Ubuntu images. - -It can be as simple as this to create an Ubuntu base image:: - - $ sudo debootstrap raring raring > /dev/null - $ sudo tar -C raring -c . | sudo docker import - raring - a29c15f1bf7a - $ sudo docker run raring cat /etc/lsb-release - DISTRIB_ID=Ubuntu - DISTRIB_RELEASE=13.04 - DISTRIB_CODENAME=raring - DISTRIB_DESCRIPTION="Ubuntu 13.04" - -There are more example scripts for creating base images in the -Docker GitHub Repo: - -* `BusyBox `_ -* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu - `_ - or - `on CentOS/RHEL/SLC/etc. - `_ -* `Debian / Ubuntu - `_ - - -Creating a simple base image using ``scratch`` -.............................................. - -There is a special repository in the Docker registry called ``scratch``, which -was created using an empty tar file:: - - $ tar cv --files-from /dev/null | docker import - scratch - -which you can ``docker pull``. You can then use that image to base your new -minimal containers ``FROM``:: - - FROM scratch - ADD true-asm /true - CMD ["/true"] - -The Dockerfile above is from extremely minimal image - -`tianon/true `_. diff --git a/docs/sources/articles/index.rst b/docs/sources/articles/index.rst deleted file mode 100644 index 75c0cd3fa9..0000000000 --- a/docs/sources/articles/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -:title: Docker articles -:description: various articles related to Docker -:keywords: docker, articles - -.. _articles_list: - -Articles -======== - -.. toctree:: - :maxdepth: 1 - - security - baseimages - runmetrics diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst deleted file mode 100644 index 6b705fb737..0000000000 --- a/docs/sources/articles/runmetrics.rst +++ /dev/null @@ -1,463 +0,0 @@ -:title: Runtime Metrics -:description: Measure the behavior of running containers -:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime - -.. _run_metrics: - - -Runtime Metrics -=============== - -Linux Containers rely on `control groups -`_ which -not only track groups of processes, but also expose metrics about CPU, -memory, and block I/O usage. You can access those metrics and obtain -network usage metrics as well. This is relevant for "pure" LXC -containers, as well as for Docker containers. - -Control Groups --------------- - -Control groups are exposed through a pseudo-filesystem. In recent -distros, you should find this filesystem under -``/sys/fs/cgroup``. Under that directory, you will see multiple -sub-directories, called devices, freezer, blkio, etc.; each -sub-directory actually corresponds to a different cgroup hierarchy. - -On older systems, the control groups might be mounted on ``/cgroup``, -without distinct hierarchies. In that case, instead of seeing the -sub-directories, you will see a bunch of files in that directory, and -possibly some directories corresponding to existing containers. - -To figure out where your control groups are mounted, you can run: - -:: - - grep cgroup /proc/mounts - -.. _run_findpid: - -Enumerating Cgroups -------------------- - -You can look into ``/proc/cgroups`` to see the different control group -subsystems known to the system, the hierarchy they belong to, and how -many groups they contain. - -You can also look at ``/proc//cgroup`` to see which control -groups a process belongs to. The control group will be shown as a path -relative to the root of the hierarchy mountpoint; e.g. ``/`` means -“this process has not been assigned into a particular group”, while -``/lxc/pumpkin`` means that the process is likely to be a member of a -container named ``pumpkin``. - -Finding the Cgroup for a Given Container ----------------------------------------- - -For each container, one cgroup will be created in each hierarchy. On -older systems with older versions of the LXC userland tools, the name -of the cgroup will be the name of the container. With more recent -versions of the LXC tools, the cgroup will be ``lxc/.`` - -For Docker containers using cgroups, the container name will be the -full ID or long ID of the container. If a container shows up as -ae836c95b4c3 in ``docker ps``, its long ID might be something like -``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You -can look it up with ``docker inspect`` or ``docker ps --no-trunc``. - -Putting everything together to look at the memory metrics for a Docker -container, take a look at ``/sys/fs/cgroup/memory/lxc//``. - -Metrics from Cgroups: Memory, CPU, Block IO -------------------------------------------- - -For each subsystem (memory, CPU, and block I/O), you will find one or -more pseudo-files containing statistics. - -Memory Metrics: ``memory.stat`` -............................... - -Memory metrics are found in the "memory" cgroup. Note that the memory -control group adds a little overhead, because it does very -fine-grained accounting of the memory usage on your host. Therefore, -many distros chose to not enable it by default. Generally, to enable -it, all you have to do is to add some kernel command-line parameters: -``cgroup_enable=memory swapaccount=1``. - -The metrics are in the pseudo-file ``memory.stat``. Here is what it -will look like: - -:: - - cache 11492564992 - rss 1930993664 - mapped_file 306728960 - pgpgin 406632648 - pgpgout 403355412 - swap 0 - pgfault 728281223 - pgmajfault 1724 - inactive_anon 46608384 - active_anon 1884520448 - inactive_file 7003344896 - active_file 4489052160 - unevictable 32768 - hierarchical_memory_limit 9223372036854775807 - hierarchical_memsw_limit 9223372036854775807 - total_cache 11492564992 - total_rss 1930993664 - total_mapped_file 306728960 - total_pgpgin 406632648 - total_pgpgout 403355412 - total_swap 0 - total_pgfault 728281223 - total_pgmajfault 1724 - total_inactive_anon 46608384 - total_active_anon 1884520448 - total_inactive_file 7003344896 - total_active_file 4489052160 - total_unevictable 32768 - -The first half (without the ``total_`` prefix) contains statistics -relevant to the processes within the cgroup, excluding -sub-cgroups. The second half (with the ``total_`` prefix) includes -sub-cgroups as well. - -Some metrics are "gauges", i.e. values that can increase or decrease -(e.g. swap, the amount of swap space used by the members of the -cgroup). Some others are "counters", i.e. values that can only go up, -because they represent occurrences of a specific event (e.g. pgfault, -which indicates the number of page faults which happened since the -creation of the cgroup; this number can never decrease). - -cache - the amount of memory used by the processes of this control group - that can be associated precisely with a block on a block - device. When you read from and write to files on disk, this amount - will increase. This will be the case if you use "conventional" I/O - (``open``, ``read``, ``write`` syscalls) as well as mapped files - (with ``mmap``). It also accounts for the memory used by ``tmpfs`` - mounts, though the reasons are unclear. - -rss - the amount of memory that *doesn't* correspond to anything on - disk: stacks, heaps, and anonymous memory maps. - -mapped_file - indicates the amount of memory mapped by the processes in the - control group. It doesn't give you information about *how much* - memory is used; it rather tells you *how* it is used. - -pgfault and pgmajfault - indicate the number of times that a process of the cgroup triggered - a "page fault" and a "major fault", respectively. A page fault - happens when a process accesses a part of its virtual memory space - which is nonexistent or protected. The former can happen if the - process is buggy and tries to access an invalid address (it will - then be sent a ``SIGSEGV`` signal, typically killing it with the - famous ``Segmentation fault`` message). The latter can happen when - the process reads from a memory zone which has been swapped out, or - which corresponds to a mapped file: in that case, the kernel will - load the page from disk, and let the CPU complete the memory - access. It can also happen when the process writes to a - copy-on-write memory zone: likewise, the kernel will preempt the - process, duplicate the memory page, and resume the write operation - on the process' own copy of the page. "Major" faults happen when the - kernel actually has to read the data from disk. When it just has to - duplicate an existing page, or allocate an empty page, it's a - regular (or "minor") fault. - -swap - the amount of swap currently used by the processes in this cgroup. - -active_anon and inactive_anon - the amount of *anonymous* memory that has been identified has - respectively *active* and *inactive* by the kernel. "Anonymous" - memory is the memory that is *not* linked to disk pages. In other - words, that's the equivalent of the rss counter described above. In - fact, the very definition of the rss counter is **active_anon** + - **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory - used up by ``tmpfs`` filesystems mounted by this control - group). Now, what's the difference between "active" and "inactive"? - Pages are initially "active"; and at regular intervals, the kernel - sweeps over the memory, and tags some pages as "inactive". Whenever - they are accessed again, they are immediately retagged - "active". When the kernel is almost out of memory, and time comes to - swap out to disk, the kernel will swap "inactive" pages. - -active_file and inactive_file - cache memory, with *active* and *inactive* similar to the *anon* - memory above. The exact formula is cache = **active_file** + - **inactive_file** + **tmpfs**. The exact rules used by the kernel to - move memory pages between active and inactive sets are different - from the ones used for anonymous memory, but the general principle - is the same. Note that when the kernel needs to reclaim memory, it - is cheaper to reclaim a clean (=non modified) page from this pool, - since it can be reclaimed immediately (while anonymous pages and - dirty/modified pages have to be written to disk first). - -unevictable - the amount of memory that cannot be reclaimed; generally, it will - account for memory that has been "locked" with ``mlock``. It is - often used by crypto frameworks to make sure that secret keys and - other sensitive material never gets swapped out to disk. - -memory and memsw limits - These are not really metrics, but a reminder of the limits applied - to this cgroup. The first one indicates the maximum amount of - physical memory that can be used by the processes of this control - group; the second one indicates the maximum amount of RAM+swap. - -Accounting for memory in the page cache is very complex. If two -processes in different control groups both read the same file -(ultimately relying on the same blocks on disk), the corresponding -memory charge will be split between the control groups. It's nice, but -it also means that when a cgroup is terminated, it could increase the -memory usage of another cgroup, because they are not splitting the -cost anymore for those memory pages. - -CPU metrics: ``cpuacct.stat`` -............................. - -Now that we've covered memory metrics, everything else will look very -simple in comparison. CPU metrics will be found in the ``cpuacct`` -controller. - -For each container, you will find a pseudo-file ``cpuacct.stat``, -containing the CPU usage accumulated by the processes of the -container, broken down between ``user`` and ``system`` time. If you're -not familiar with the distinction, ``user`` is the time during which -the processes were in direct control of the CPU (i.e. executing -process code), and ``system`` is the time during which the CPU was -executing system calls on behalf of those processes. - -Those times are expressed in ticks of 1/100th of a second. Actually, -they are expressed in "user jiffies". There are ``USER_HZ`` -*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This -used to map exactly to the number of scheduler "ticks" per second; but -with the advent of higher frequency scheduling, as well as `tickless -kernels `_, the number of kernel -ticks wasn't relevant anymore. It stuck around anyway, mainly for -legacy and compatibility reasons. - -Block I/O metrics -................. - -Block I/O is accounted in the ``blkio`` controller. Different metrics -are scattered across different files. While you can find in-depth -details in the `blkio-controller -`_ -file in the kernel documentation, here is a short list of the most -relevant ones: - -blkio.sectors - contain the number of 512-bytes sectors read and written by the - processes member of the cgroup, device by device. Reads and writes - are merged in a single counter. - -blkio.io_service_bytes - indicates the number of bytes read and written by the cgroup. It has - 4 counters per device, because for each device, it differentiates - between synchronous vs. asynchronous I/O, and reads vs. writes. - -blkio.io_serviced - the number of I/O operations performed, regardless of their size. It - also has 4 counters per device. - -blkio.io_queued - indicates the number of I/O operations currently queued for this - cgroup. In other words, if the cgroup isn't doing any I/O, this will - be zero. Note that the opposite is not true. In other words, if - there is no I/O queued, it does not mean that the cgroup is idle - (I/O-wise). It could be doing purely synchronous reads on an - otherwise quiescent device, which is therefore able to handle them - immediately, without queuing. Also, while it is helpful to figure - out which cgroup is putting stress on the I/O subsystem, keep in - mind that is is a relative quantity. Even if a process group does - not perform more I/O, its queue size can increase just because the - device load increases because of other devices. - -Network Metrics ---------------- - -Network metrics are not exposed directly by control groups. There is a -good explanation for that: network interfaces exist within the context -of *network namespaces*. The kernel could probably accumulate metrics -about packets and bytes sent and received by a group of processes, but -those metrics wouldn't be very useful. You want per-interface metrics -(because traffic happening on the local ``lo`` interface doesn't -really count). But since processes in a single cgroup can belong to -multiple network namespaces, those metrics would be harder to -interpret: multiple network namespaces means multiple ``lo`` -interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is -why there is no easy way to gather network metrics with control -groups. - -Instead we can gather network metrics from other sources: - -IPtables -........ - -IPtables (or rather, the netfilter framework for which iptables is -just an interface) can do some serious accounting. - -For instance, you can setup a rule to account for the outbound HTTP -traffic on a web server: - -:: - - iptables -I OUTPUT -p tcp --sport 80 - - -There is no ``-j`` or ``-g`` flag, so the rule will just count matched -packets and go to the following rule. - -Later, you can check the values of the counters, with: - -:: - - iptables -nxvL OUTPUT - -Technically, ``-n`` is not required, but it will prevent iptables from -doing DNS reverse lookups, which are probably useless in this -scenario. - -Counters include packets and bytes. If you want to setup metrics for -container traffic like this, you could execute a ``for`` loop to add -two ``iptables`` rules per container IP address (one in each -direction), in the ``FORWARD`` chain. This will only meter traffic -going through the NAT layer; you will also have to add traffic going -through the userland proxy. - -Then, you will need to check those counters on a regular basis. If you -happen to use ``collectd``, there is a nice plugin to automate -iptables counters collection. - -Interface-level counters -........................ - -Since each container has a virtual Ethernet interface, you might want -to check directly the TX and RX counters of this interface. You will -notice that each container is associated to a virtual Ethernet -interface in your host, with a name like ``vethKk8Zqi``. Figuring out -which interface corresponds to which container is, unfortunately, -difficult. - -But for now, the best way is to check the metrics *from within the -containers*. To accomplish this, you can run an executable from the -host environment within the network namespace of a container using -**ip-netns magic**. - -The ``ip-netns exec`` command will let you execute any program -(present in the host system) within any network namespace visible to -the current process. This means that your host will be able to enter -the network namespace of your containers, but your containers won't be -able to access the host, nor their sibling containers. Containers will -be able to “see” and affect their sub-containers, though. - -The exact format of the command is:: - - ip netns exec - -For example:: - - ip netns exec mycontainer netstat -i - -``ip netns`` finds the "mycontainer" container by using namespaces -pseudo-files. Each process belongs to one network namespace, one PID -namespace, one ``mnt`` namespace, etc., and those namespaces are -materialized under ``/proc//ns/``. For example, the network -namespace of PID 42 is materialized by the pseudo-file -``/proc/42/ns/net``. - -When you run ``ip netns exec mycontainer ...``, it expects -``/var/run/netns/mycontainer`` to be one of those -pseudo-files. (Symlinks are accepted.) - -In other words, to execute a command within the network namespace of a -container, we need to: - -* Find out the PID of any process within the container that we want to - investigate; -* Create a symlink from ``/var/run/netns/`` to - ``/proc//ns/net`` -* Execute ``ip netns exec ....`` - -Please review :ref:`run_findpid` to learn how to find the cgroup of a -pprocess running in the container of which you want to measure network -usage. From there, you can examine the pseudo-file named ``tasks``, -which containes the PIDs that are in the control group (i.e. in the -container). Pick any one of them. - -Putting everything together, if the "short ID" of a container is held -in the environment variable ``$CID``, then you can do this:: - - TASKS=/sys/fs/cgroup/devices/$CID*/tasks - PID=$(head -n 1 $TASKS) - mkdir -p /var/run/netns - ln -sf /proc/$PID/ns/net /var/run/netns/$CID - ip netns exec $CID netstat -i - - -Tips for high-performance metric collection -------------------------------------------- - -Note that running a new process each time you want to update metrics -is (relatively) expensive. If you want to collect metrics at high -resolutions, and/or over a large number of containers (think 1000 -containers on a single host), you do not want to fork a new process -each time. - -Here is how to collect metrics from a single process. You will have to -write your metric collector in C (or any language that lets you do -low-level system calls). You need to use a special system call, -``setns()``, which lets the current process enter any arbitrary -namespace. It requires, however, an open file descriptor to the -namespace pseudo-file (remember: that’s the pseudo-file in -``/proc//ns/net``). - -However, there is a catch: you must not keep this file descriptor -open. If you do, when the last process of the control group exits, the -namespace will not be destroyed, and its network resources (like the -virtual interface of the container) will stay around for ever (or -until you close that file descriptor). - -The right approach would be to keep track of the first PID of each -container, and re-open the namespace pseudo-file each time. - -Collecting metrics when a container exits ------------------------------------------ - -Sometimes, you do not care about real time metric collection, but when -a container exits, you want to know how much CPU, memory, etc. it has -used. - -Docker makes this difficult because it relies on ``lxc-start``, which -carefully cleans up after itself, but it is still possible. It is -usually easier to collect metrics at regular intervals (e.g. every -minute, with the collectd LXC plugin) and rely on that instead. - -But, if you'd still like to gather the stats when a container stops, -here is how: - -For each container, start a collection process, and move it to the -control groups that you want to monitor by writing its PID to the -tasks file of the cgroup. The collection process should periodically -re-read the tasks file to check if it's the last process of the -control group. (If you also want to collect network statistics as -explained in the previous section, you should also move the process to -the appropriate network namespace.) - -When the container exits, ``lxc-start`` will try to delete the control -groups. It will fail, since the control group is still in use; but -that’s fine. You process should now detect that it is the only one -remaining in the group. Now is the right time to collect all the -metrics you need! - -Finally, your process should move itself back to the root control -group, and remove the container control group. To remove a control -group, just ``rmdir`` its directory. It's counter-intuitive to -``rmdir`` a directory as it still contains files; but remember that -this is a pseudo-filesystem, so usual rules don't apply. After the -cleanup is done, the collection process can exit safely. - diff --git a/docs/sources/articles/security.rst b/docs/sources/articles/security.rst deleted file mode 100644 index ec2ab9bffd..0000000000 --- a/docs/sources/articles/security.rst +++ /dev/null @@ -1,269 +0,0 @@ -:title: Docker Security -:description: Review of the Docker Daemon attack surface -:keywords: Docker, Docker documentation, security - -.. _dockersecurity: - -Docker Security -=============== - - *Adapted from* `Containers & Docker: How Secure are They? `_ - -There are three major areas to consider when reviewing Docker security: - -* the intrinsic security of containers, as implemented by kernel - namespaces and cgroups; -* the attack surface of the Docker daemon itself; -* the "hardening" security features of the kernel and how they - interact with containers. - -Kernel Namespaces ------------------ - -Docker containers are essentially LXC containers, and they come with -the same security features. When you start a container with ``docker -run``, behind the scenes Docker uses ``lxc-start`` to execute the -Docker container. This creates a set of namespaces and control groups -for the container. Those namespaces and control groups are not created -by Docker itself, but by ``lxc-start``. This means that as the LXC -userland tools evolve (and provide additional namespaces and isolation -features), Docker will automatically make use of them. - -**Namespaces provide the first and most straightforward form of -isolation**: processes running within a container cannot see, and even -less affect, processes running in another container, or in the host -system. - -**Each container also gets its own network stack**, meaning that a -container doesn’t get a privileged access to the sockets or interfaces -of another container. Of course, if the host system is setup -accordingly, containers can interact with each other through their -respective network interfaces — just like they can interact with -external hosts. When you specify public ports for your containers or -use :ref:`links ` then IP traffic is allowed -between containers. They can ping each other, send/receive UDP -packets, and establish TCP connections, but that can be restricted if -necessary. From a network architecture point of view, all containers -on a given Docker host are sitting on bridge interfaces. This means -that they are just like physical machines connected through a common -Ethernet switch; no more, no less. - -How mature is the code providing kernel namespaces and private -networking? Kernel namespaces were introduced `between kernel version -2.6.15 and 2.6.26 -`_. This -means that since July 2008 (date of the 2.6.26 release, now 5 years -ago), namespace code has been exercised and scrutinized on a large -number of production systems. And there is more: the design and -inspiration for the namespaces code are even older. Namespaces are -actually an effort to reimplement the features of `OpenVZ -`_ in such a way that they could -be merged within the mainstream kernel. And OpenVZ was initially -released in 2005, so both the design and the implementation are -pretty mature. - -Control Groups --------------- - -Control Groups are the other key component of Linux Containers. They -implement resource accounting and limiting. They provide a lot of very -useful metrics, but they also help to ensure that each container gets -its fair share of memory, CPU, disk I/O; and, more importantly, that a -single container cannot bring the system down by exhausting one of -those resources. - -So while they do not play a role in preventing one container from -accessing or affecting the data and processes of another container, -they are essential to fend off some denial-of-service attacks. They -are particularly important on multi-tenant platforms, like public and -private PaaS, to guarantee a consistent uptime (and performance) even -when some applications start to misbehave. - -Control Groups have been around for a while as well: the code was -started in 2006, and initially merged in kernel 2.6.24. - -.. _dockersecurity_daemon: - -Docker Daemon Attack Surface ----------------------------- - -Running containers (and applications) with Docker implies running the -Docker daemon. This daemon currently requires root privileges, and you -should therefore be aware of some important details. - -First of all, **only trusted users should be allowed to control your -Docker daemon**. This is a direct consequence of some powerful Docker -features. Specifically, Docker allows you to share a directory between -the Docker host and a guest container; and it allows you to do so -without limiting the access rights of the container. This means that -you can start a container where the ``/host`` directory will be the -``/`` directory on your host; and the container will be able to alter -your host filesystem without any restriction. This sounds crazy? Well, -you have to know that **all virtualization systems allowing filesystem -resource sharing behave the same way**. Nothing prevents you from -sharing your root filesystem (or even your root block device) with a -virtual machine. - -This has a strong security implication: if you instrument Docker from -e.g. a web server to provision containers through an API, you should -be even more careful than usual with parameter checking, to make sure -that a malicious user cannot pass crafted parameters causing Docker to -create arbitrary containers. - -For this reason, the REST API endpoint (used by the Docker CLI to -communicate with the Docker daemon) changed in Docker 0.5.2, and now -uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the -latter being prone to cross-site-scripting attacks if you happen to -run Docker directly on your local machine, outside of a VM). You can -then use traditional UNIX permission checks to limit access to the -control socket. - -You can also expose the REST API over HTTP if you explicitly decide -so. However, if you do that, being aware of the abovementioned -security implication, you should ensure that it will be reachable -only from a trusted network or VPN; or protected with e.g. ``stunnel`` -and client SSL certificates. - -Recent improvements in Linux namespaces will soon allow to run -full-featured containers without root privileges, thanks to the new -user namespace. This is covered in detail `here -`_. Moreover, -this will solve the problem caused by sharing filesystems between host -and guest, since the user namespace allows users within containers -(including the root user) to be mapped to other users in the host -system. - -The end goal for Docker is therefore to implement two additional -security improvements: - -* map the root user of a container to a non-root user of the Docker - host, to mitigate the effects of a container-to-host privilege - escalation; -* allow the Docker daemon to run without root privileges, and delegate - operations requiring those privileges to well-audited sub-processes, - each with its own (very limited) scope: virtual network setup, - filesystem management, etc. - -Finally, if you run Docker on a server, it is recommended to run -exclusively Docker in the server, and move all other services within -containers controlled by Docker. Of course, it is fine to keep your -favorite admin tools (probably at least an SSH server), as well as -existing monitoring/supervision processes (e.g. NRPE, collectd, etc). - -Linux Kernel Capabilities -------------------------- - -By default, Docker starts containers with a very restricted set of -capabilities. What does that mean? - -Capabilities turn the binary "root/non-root" dichotomy into a -fine-grained access control system. Processes (like web servers) that -just need to bind on a port below 1024 do not have to run as root: -they can just be granted the ``net_bind_service`` capability -instead. And there are many other capabilities, for almost all the -specific areas where root privileges are usually needed. - -This means a lot for container security; let’s see why! - -Your average server (bare metal or virtual machine) needs to run a -bunch of processes as root. Those typically include SSH, cron, -syslogd; hardware management tools (to e.g. load modules), network -configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much -more. A container is very different, because almost all of those tasks -are handled by the infrastructure around the container: - -* SSH access will typically be managed by a single server running in - the Docker host; -* ``cron``, when necessary, should run as a user process, dedicated - and tailored for the app that needs its scheduling service, rather - than as a platform-wide facility; -* log management will also typically be handed to Docker, or by - third-party services like Loggly or Splunk; -* hardware management is irrelevant, meaning that you never need to - run ``udevd`` or equivalent daemons within containers; -* network management happens outside of the containers, enforcing - separation of concerns as much as possible, meaning that a container - should never need to perform ``ifconfig``, ``route``, or ip commands - (except when a container is specifically engineered to behave like a - router or firewall, of course). - -This means that in most cases, containers will not need "real" root -privileges *at all*. And therefore, containers can run with a reduced -capability set; meaning that "root" within a container has much less -privileges than the real "root". For instance, it is possible to: - -* deny all "mount" operations; -* deny access to raw sockets (to prevent packet spoofing); -* deny access to some filesystem operations, like creating new device - nodes, changing the owner of files, or altering attributes - (including the immutable flag); -* deny module loading; -* and many others. - -This means that even if an intruder manages to escalate to root within -a container, it will be much harder to do serious damage, or to -escalate to the host. - -This won't affect regular web apps; but malicious users will find that -the arsenal at their disposal has shrunk considerably! You can see -`the list of dropped capabilities in the Docker code -`_, -and a full list of available capabilities in `Linux manpages -`_. - -Of course, you can always enable extra capabilities if you really need -them (for instance, if you want to use a FUSE-based filesystem), but -by default, Docker containers will be locked down to ensure maximum -safety. - -Other Kernel Security Features ------------------------------- - -Capabilities are just one of the many security features provided by -modern Linux kernels. It is also possible to leverage existing, -well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with -Docker. - -While Docker currently only enables capabilities, it doesn't interfere -with the other systems. This means that there are many different ways -to harden a Docker host. Here are a few examples. - -* You can run a kernel with GRSEC and PAX. This will add many safety - checks, both at compile-time and run-time; it will also defeat many - exploits, thanks to techniques like address randomization. It - doesn’t require Docker-specific configuration, since those security - features apply system-wide, independently of containers. -* If your distribution comes with security model templates for LXC - containers, you can use them out of the box. For instance, Ubuntu - comes with AppArmor templates for LXC, and those templates provide - an extra safety net (even though it overlaps greatly with - capabilities). -* You can define your own policies using your favorite access control - mechanism. Since Docker containers are standard LXC containers, - there is nothing “magic” or specific to Docker. - -Just like there are many third-party tools to augment Docker -containers with e.g. special network topologies or shared filesystems, -you can expect to see tools to harden existing Docker containers -without affecting Docker’s core. - -Conclusions ------------ - -Docker containers are, by default, quite secure; especially if you -take care of running your processes inside the containers as -non-privileged users (i.e. non root). - -You can add an extra layer of safety by enabling Apparmor, SELinux, -GRSEC, or your favorite hardening solution. - -Last but not least, if you see interesting security features in other -containerization systems, you will be able to implement them as well -with Docker, since everything is provided by the kernel anyway. - -For more context and especially for comparisons with VMs and other -container systems, please also see the `original blog post -`_. - -.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/ - diff --git a/docs/sources/conf.py b/docs/sources/conf.py deleted file mode 100644 index 12f5b57841..0000000000 --- a/docs/sources/conf.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Docker documentation build configuration file, created by -# sphinx-quickstart on Tue Mar 19 12:34:07 2013. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - - - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# the 'redirect_home.html' page redirects using a http meta refresh which, according -# to official sources is more or less equivalent of a 301. - -html_additional_pages = { - 'concepts/containers': 'redirect_home.html', - 'concepts/introduction': 'redirect_home.html', - 'builder/basics': 'redirect_build.html', - } - - - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks'] - -# Configure extlinks -extlinks = { 'issue': ('https://github.com/dotcloud/docker/issues/%s', - 'Issue ') } - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -html_add_permalinks = u'¶' - -# The master toctree document. -master_doc = 'toctree' - -# General information about the project. -project = u'Docker' -copyright = u'2014 Docker, Inc.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' -# The full version, including alpha/beta/rc tags. -release = '0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'docker' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] -html_theme_path = ['../theme'] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. - -# We use a png favicon. This is not compatible with internet explorer, but looks -# much better on all other browsers. However, sphynx doesn't like it (it likes -# .ico better) so we have just put it in the template rather than used this setting -# html_favicon = 'favicon.png' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['static_files'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Dockerdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('toctree', 'Docker.tex', u'Docker Documentation', - u'Team Docker', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('reference/commandline/cli', 'docker', u'Docker CLI Documentation', - [u'Team Docker'], 1), - ('reference/builder', 'Dockerfile', u'Dockerfile Documentation', - [u'Team Docker'], 5), -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('toctree', 'Docker', u'Docker Documentation', - u'Team Docker', 'Docker', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' diff --git a/docs/sources/contributing/contributing.rst b/docs/sources/contributing/contributing.rst deleted file mode 100644 index 3b3b3f8f88..0000000000 --- a/docs/sources/contributing/contributing.rst +++ /dev/null @@ -1,25 +0,0 @@ -:title: Contribution Guidelines -:description: Contribution guidelines: create issues, conventions, pull requests -:keywords: contributing, docker, documentation, help, guideline - -Contributing to Docker -====================== - -Want to hack on Docker? Awesome! - -The repository includes `all the instructions you need to get -started `_. - -The `developer environment Dockerfile -`_ -specifies the tools and versions used to test and build Docker. - -If you're making changes to the documentation, see the -`README.md `_. - -The `documentation environment Dockerfile -`_ -specifies the tools and versions used to build the Documentation. - -Further interesting details can be found in the `Packaging hints -`_. diff --git a/docs/sources/contributing/devenvironment.rst b/docs/sources/contributing/devenvironment.rst deleted file mode 100644 index fbd47cbed7..0000000000 --- a/docs/sources/contributing/devenvironment.rst +++ /dev/null @@ -1,167 +0,0 @@ -:title: Setting Up a Dev Environment -:description: Guides on how to contribute to docker -:keywords: Docker, documentation, developers, contributing, dev environment - -Setting Up a Dev Environment -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To make it easier to contribute to Docker, we provide a standard -development environment. It is important that the same environment be -used for all tests, builds and releases. The standard development -environment defines all build dependencies: system libraries and -binaries, go environment, go dependencies, etc. - - -Step 1: Install Docker ----------------------- - -Docker's build environment itself is a Docker container, so the first -step is to install Docker on your system. - -You can follow the `install instructions most relevant to your system -`_. Make sure you have -a working, up-to-date docker installation, then continue to the next -step. - - -Step 2: Install tools used for this tutorial --------------------------------------------- - -Install ``git``; honest, it's very good. You can use other ways to get the Docker -source, but they're not anywhere near as easy. - -Install ``make``. This tutorial uses our base Makefile to kick off the docker -containers in a repeatable and consistent way. Again, you can do it in other ways -but you need to do more work. - -Step 3: Check out the Source ----------------------------- - -.. code-block:: bash - - git clone http://git@github.com/dotcloud/docker - cd docker - -To checkout a different revision just use ``git checkout`` with the name of branch or revision number. - - -Step 4: Build the Environment ------------------------------ - -This following command will build a development environment using the Dockerfile in the current directory. Essentially, it will install all the build and runtime dependencies necessary to build and test Docker. This command will take some time to complete when you first execute it. - -.. code-block:: bash - - sudo make build - -If the build is successful, congratulations! You have produced a clean build of -docker, neatly encapsulated in a standard build environment. - - -Step 5: Build the Docker Binary -------------------------------- - -To create the Docker binary, run this command: - -.. code-block:: bash - - sudo make binary - -This will create the Docker binary in ``./bundles/-dev/binary/`` - -Using your built Docker binary -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The binary is available outside the container in the directory -``./bundles/-dev/binary/``. You can swap your host docker executable -with this binary for live testing - for example, on ubuntu: - -.. code-block:: bash - - sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/-dev/binary/docker--dev $(which docker);sudo service docker start - -.. note:: Its safer to run the tests below before swapping your hosts docker binary. - - -Step 5: Run the Tests ---------------------- - -To execute the test cases, run this command: - -.. code-block:: bash - - sudo make test - -If the test are successful then the tail of the output should look something like this - -.. code-block:: bash - - --- PASS: TestWriteBroadcaster (0.00 seconds) - === RUN TestRaceWriteBroadcaster - --- PASS: TestRaceWriteBroadcaster (0.00 seconds) - === RUN TestTruncIndex - --- PASS: TestTruncIndex (0.00 seconds) - === RUN TestCompareKernelVersion - --- PASS: TestCompareKernelVersion (0.00 seconds) - === RUN TestHumanSize - --- PASS: TestHumanSize (0.00 seconds) - === RUN TestParseHost - --- PASS: TestParseHost (0.00 seconds) - === RUN TestParseRepositoryTag - --- PASS: TestParseRepositoryTag (0.00 seconds) - === RUN TestGetResolvConf - --- PASS: TestGetResolvConf (0.00 seconds) - === RUN TestCheckLocalDns - --- PASS: TestCheckLocalDns (0.00 seconds) - === RUN TestParseRelease - --- PASS: TestParseRelease (0.00 seconds) - === RUN TestDependencyGraphCircular - --- PASS: TestDependencyGraphCircular (0.00 seconds) - === RUN TestDependencyGraph - --- PASS: TestDependencyGraph (0.00 seconds) - PASS - ok github.com/dotcloud/docker/utils 0.017s - -If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -You can use this to select certain tests to run, eg. - - TESTFLAGS='-run ^TestBuild$' make test - -If the output indicates "FAIL" and you see errors like this: - -.. code-block:: text - - server.go:1302 Error: Insertion failed because database is full: database or disk is full - - utils_test.go:179: Error copy: exit status 1 (cp: writing '/tmp/docker-testd5c9-[...]': No space left on device - -Then you likely don't have enough memory available the test suite. 2GB is recommended. - -Step 6: Use Docker -------------------- - -You can run an interactive session in the newly built container: - -.. code-block:: bash - - sudo make shell - - # type 'exit' or Ctrl-D to exit - - -Extra Step: Build and view the Documentation --------------------------------------------- - -If you want to read the documentation from a local website, or are making changes -to it, you can build the documentation and then serve it by: - -.. code-block:: bash - - sudo make docs - # when its done, you can point your browser to http://yourdockerhost:8000 - # type Ctrl-C to exit - - -**Need More Help?** - -If you need more help then hop on to the `#docker-dev IRC channel `_ or post a message on the `Docker developer mailing list `_. diff --git a/docs/sources/contributing/index.rst b/docs/sources/contributing/index.rst deleted file mode 100644 index 3669807a14..0000000000 --- a/docs/sources/contributing/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -:title: Contributing to Docker -:description: Guides on how to contribute to docker -:keywords: Docker, documentation, developers, contributing, dev environment - - - -Contributing -============ - -.. toctree:: - :maxdepth: 1 - - contributing - devenvironment diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst deleted file mode 100644 index dd844d4ef1..0000000000 --- a/docs/sources/examples/apt-cacher-ng.rst +++ /dev/null @@ -1,102 +0,0 @@ -:title: Running an apt-cacher-ng service -:description: Installing and running an apt-cacher-ng service -:keywords: docker, example, package installation, networking, debian, ubuntu - -.. _running_apt-cacher-ng_service: - -Apt-Cacher-ng Service -===================== - -.. include:: example_header.inc - - -When you have multiple Docker servers, or build unrelated Docker containers -which can't make use of the Docker build cache, it can be useful to have a -caching proxy for your packages. This container makes the second download of -any package almost instant. - -Use the following Dockerfile: - -.. literalinclude:: apt-cacher-ng.Dockerfile - -To build the image using: - -.. code-block:: bash - - $ sudo docker build -t eg_apt_cacher_ng . - -Then run it, mapping the exposed port to one on the host - -.. code-block:: bash - - $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng - -To see the logfiles that are 'tailed' in the default command, you can use: - -.. code-block:: bash - - $ sudo docker logs -f test_apt_cacher_ng - -To get your Debian-based containers to use the proxy, you can do one of three things - -1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`` -2. Set an environment variable: ``http_proxy=http://dockerhost:3142/`` -3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/`` - -**Option 1** injects the settings safely into your apt configuration in a local -version of a common base: - -.. code-block:: bash - - FROM ubuntu - RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy - RUN apt-get update ; apt-get install vim git - - # docker build -t my_ubuntu . - -**Option 2** is good for testing, but will -break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others: - -.. code-block:: bash - - $ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash - -**Option 3** is the least portable, but there will be times when you might need to -do it and you can do it from your ``Dockerfile`` too. - -Apt-cacher-ng has some tools that allow you to manage the repository, and they -can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the -service: - -.. code-block:: bash - - $ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash - - $$ /usr/lib/apt-cacher-ng/distkill.pl - Scanning /var/cache/apt-cacher-ng, please wait... - Found distributions: - bla, taggedcount: 0 - 1. precise-security (36 index files) - 2. wheezy (25 index files) - 3. precise-updates (36 index files) - 4. precise (36 index files) - 5. wheezy-updates (18 index files) - - Found architectures: - 6. amd64 (36 index files) - 7. i386 (24 index files) - - WARNING: The removal action may wipe out whole directories containing - index files. Select d to see detailed list. - - (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q - - -Finally, clean up after your test by stopping and removing the container, and -then removing the image. - -.. code-block:: bash - - $ sudo docker stop test_apt_cacher_ng - $ sudo docker rm test_apt_cacher_ng - $ sudo docker rmi eg_apt_cacher_ng diff --git a/docs/sources/examples/cfengine_process_management.rst b/docs/sources/examples/cfengine_process_management.rst deleted file mode 100644 index 7ca2c35498..0000000000 --- a/docs/sources/examples/cfengine_process_management.rst +++ /dev/null @@ -1,137 +0,0 @@ -:title: Process Management with CFEngine -:description: Managing containerized processes with CFEngine -:keywords: cfengine, process, management, usage, docker, documentation - -Process Management with CFEngine -================================ - -Create Docker containers with managed processes. - -Docker monitors one process in each running container and the container lives or dies with that process. -By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise: - -* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command. -* If a managed process dies or crashes, CFEngine will start it again within 1 minute. -* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides. - - -How it works ------------- - -CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image. - -The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters. -When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container. - -CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found. -For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command. -If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again. -The check on the process table happens every minute. - -Note that it is therefore important that the command to start your application leaves a process with the basename of the command. -This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired. - - -Usage ------ - -This example assumes you have Docker installed and working. -We will install and manage ``apache2`` and ``sshd`` in a single container. - -There are three steps: - -1. Install CFEngine into the container. -2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation. -3. Start your application processes as part of the ``docker run`` command. - - -Building the container image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first two steps can be done as part of a Dockerfile, as follows. - -.. code-block:: bash - - FROM ubuntu - MAINTAINER Eystein Måløy Stenberg - - RUN apt-get -y install wget lsb-release unzip ca-certificates - - # install latest CFEngine - RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add - - RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list - RUN apt-get update - RUN apt-get install cfengine-community - - # install cfe-docker process management policy - RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/ - RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/ - RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/ - RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip - - # apache2 and openssh are just for testing purposes, install your own apps here - RUN apt-get -y install openssh-server apache2 - RUN mkdir -p /var/run/sshd - RUN echo "root:password" | chpasswd # need a password for ssh - - ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"] - - -By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command, -e.g. ``docker build -t managed_image``. - -Testing the container -~~~~~~~~~~~~~~~~~~~~~ - -Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance: - -.. code-block:: bash - - docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" - -We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes -as part of a normal ``docker run`` command. - -We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to -"password" in the Dockerfile above and can use that to log in with ssh: - -.. code-block:: bash - - ssh -p222 root@127.0.0.1 - - ps -ef - UID PID PPID C STIME TTY TIME CMD - root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start - root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F - root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd - root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start - www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start - www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start - www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start - root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0 - root 105 93 0 07:48 pts/0 00:00:00 -bash - root 112 105 0 07:49 pts/0 00:00:00 ps -ef - - -If we stop apache2, it will be started again within a minute by CFEngine. - -.. code-block:: bash - - service apache2 status - Apache2 is running (pid 32). - service apache2 stop - * Stopping web server apache2 ... waiting [ OK ] - service apache2 status - Apache2 is NOT running. - # ... wait up to 1 minute... - service apache2 status - Apache2 is running (pid 173). - - -Adapting to your applications ------------------------------ - -To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example: - -* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``. -* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``. diff --git a/docs/sources/examples/couchdb_data_volumes.rst b/docs/sources/examples/couchdb_data_volumes.rst deleted file mode 100644 index 6cf3fab68c..0000000000 --- a/docs/sources/examples/couchdb_data_volumes.rst +++ /dev/null @@ -1,56 +0,0 @@ -:title: Sharing data between 2 couchdb databases -:description: Sharing data between 2 couchdb databases -:keywords: docker, example, package installation, networking, couchdb, data volumes - -.. _running_couchdb_service: - -CouchDB Service -=============== - -.. include:: example_header.inc - -Here's an example of using data volumes to share the same data between -two CouchDB containers. This could be used for hot upgrades, testing -different versions of CouchDB on the same data, etc. - -Create first database ---------------------- - -Note that we're marking ``/var/lib/couchdb`` as a data volume. - -.. code-block:: bash - - COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) - -Add data to the first database ------------------------------- - -We're assuming your Docker host is reachable at ``localhost``. If not, -replace ``localhost`` with the public IP of your Docker host. - -.. code-block:: bash - - HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" - echo "Navigate to $URL in your browser, and use the couch interface to add data" - -Create second database ----------------------- - -This time, we're requesting shared access to ``$COUCH1``'s volumes. - -.. code-block:: bash - - COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) - -Browse data on the second database ----------------------------------- - -.. code-block:: bash - - HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" - echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' - -Congratulations, you are now running two Couchdb containers, completely -isolated from each other *except* for their data. diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst deleted file mode 100644 index 39d7abea2c..0000000000 --- a/docs/sources/examples/hello_world.rst +++ /dev/null @@ -1,181 +0,0 @@ -:title: Hello world example -:description: A simple hello world example with Docker -:keywords: docker, example, hello world - -.. _running_examples: - -Check your Docker install -------------------------- - -This guide assumes you have a working installation of Docker. To check -your Docker install, run the following command: - -.. code-block:: bash - - # Check that you have a working install - $ sudo docker info - -If you get ``docker: command not found`` or something like -``/var/lib/docker/repositories: permission denied`` you may have an incomplete -Docker installation or insufficient privileges to access docker on your machine. - -Please refer to :ref:`installation_list` for installation instructions. - - -.. _hello_world: - -Hello World ------------ - -.. include:: example_header.inc - -This is the most basic example available for using Docker. - -Download the small base image named ``busybox``: - -.. code-block:: bash - - # Download a busybox image - $ sudo docker pull busybox - -The ``busybox`` image is a minimal Linux system. You can do the same -with any number of other images, such as ``debian``, ``ubuntu`` or ``centos``. -The images can be found and retrieved using the `Docker index`_. - -.. _Docker index: http://index.docker.io - -.. code-block:: bash - - $ sudo docker run busybox /bin/echo hello world - -This command will run a simple ``echo`` command, that will echo ``hello world`` back to the console over standard out. - -**Explanation:** - -- **"sudo"** execute the following commands as user *root* -- **"docker run"** run a command in a new container -- **"busybox"** is the image we are running the command in. -- **"/bin/echo"** is the command we want to run in the container -- **"hello world"** is the input for the echo command - - - -**Video:** - -See the example in action - -.. raw:: html - - - ----- - -.. _hello_world_daemon: - -Hello World Daemon ------------------- - -.. include:: example_header.inc - -And now for the most boring daemon ever written! - -We will use the Ubuntu image to run a simple hello world daemon that will just print hello -world to standard out every second. It will continue to do this until -we stop it. - -**Steps:** - -.. code-block:: bash - - container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") - -We are going to run a simple hello world daemon in a new container -made from the ``ubuntu`` image. - -- **"sudo docker run -d "** run a command in a new container. We pass "-d" - so it runs as a daemon. -- **"ubuntu"** is the image we want to run the command inside of. -- **"/bin/sh -c"** is the command we want to run in the container -- **"while true; do echo hello world; sleep 1; done"** is the mini - script we want to run, that will just print hello world once a - second until we stop it. -- **$container_id** the output of the run command will return a - container id, we can use in future commands to see what is going on - with this process. - -.. code-block:: bash - - sudo docker logs $container_id - -Check the logs make sure it is working correctly. - -- **"docker logs**" This will return the logs for a container -- **$container_id** The Id of the container we want the logs for. - -.. code-block:: bash - - sudo docker attach --sig-proxy=false $container_id - -Attach to the container to see the results in real-time. - -- **"docker attach**" This will allow us to attach to a background - process to see what is going on. -- **"--sig-proxy=false"** Do not forward signals to the container; allows - us to exit the attachment using Control-C without stopping the container. -- **$container_id** The Id of the container we want to attach to. - -Exit from the container attachment by pressing Control-C. - -.. code-block:: bash - - sudo docker ps - -Check the process list to make sure it is running. - -- **"docker ps"** this shows all running process managed by docker - -.. code-block:: bash - - sudo docker stop $container_id - -Stop the container, since we don't need it anymore. - -- **"docker stop"** This stops a container -- **$container_id** The Id of the container we want to stop. - -.. code-block:: bash - - sudo docker ps - -Make sure it is really stopped. - - -**Video:** - -See the example in action - -.. raw:: html - - - -The next example in the series is a :ref:`nodejs_web_app` example, or -you could skip to any of the other examples: - - -* :ref:`nodejs_web_app` -* :ref:`running_redis_service` -* :ref:`running_ssh_service` -* :ref:`running_couchdb_service` -* :ref:`postgresql_service` -* :ref:`mongodb_image` -* :ref:`python_web_app` diff --git a/docs/sources/examples/https.rst b/docs/sources/examples/https.rst deleted file mode 100644 index 7a221ed951..0000000000 --- a/docs/sources/examples/https.rst +++ /dev/null @@ -1,126 +0,0 @@ -:title: Docker HTTPS Setup -:description: How to setup docker with https -:keywords: docker, example, https, daemon - -.. _running_docker_https: - -Running Docker with https -========================= - -By default, Docker runs via a non-networked Unix socket. It can also optionally -communicate using a HTTP socket. - -If you need Docker reachable via the network in a safe manner, you can enable -TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a -trusted CA certificate. - -In daemon mode, it will only allow connections from clients authenticated by a -certificate signed by that CA. In client mode, it will only connect to servers -with a certificate signed by that CA. - -.. warning:: - - Using TLS and managing a CA is an advanced topic. Please make you self familiar - with openssl, x509 and tls before using it in production. - -Create a CA, server and client keys with OpenSSL ------------------------------------------------- - -First, initialize the CA serial file and generate CA private and public keys: - -.. code-block:: bash - - $ echo 01 > ca.srl - $ openssl genrsa -des3 -out ca-key.pem - $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem - -Now that we have a CA, you can create a server key and certificate signing request. -Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use -to connect to Docker or just use '*' for a certificate valid for any hostname: - -.. code-block:: bash - - $ openssl genrsa -des3 -out server-key.pem - $ openssl req -new -key server-key.pem -out server.csr - -Next we're going to sign the key with our CA: - -.. code-block:: bash - - $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \ - -out server-cert.pem - -For client authentication, create a client key and certificate signing request: - -.. code-block:: bash - - $ openssl genrsa -des3 -out client-key.pem - $ openssl req -new -key client-key.pem -out client.csr - - -To make the key suitable for client authentication, create a extensions config file: - -.. code-block:: bash - - $ echo extendedKeyUsage = clientAuth > extfile.cnf - -Now sign the key: - -.. code-block:: bash - - $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \ - -out client-cert.pem -extfile extfile.cnf - -Finally you need to remove the passphrase from the client and server key: - -.. code-block:: bash - - $ openssl rsa -in server-key.pem -out server-key.pem - $ openssl rsa -in client-key.pem -out client-key.pem - -Now you can make the Docker daemon only accept connections from clients providing -a certificate trusted by our CA: - -.. code-block:: bash - - $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ - -H=0.0.0.0:4243 - -To be able to connect to Docker and validate its certificate, you now need to provide your client keys, -certificates and trusted CA: - -.. code-block:: bash - - $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \ - -H=dns-name-of-docker-host:4243 - -.. warning:: - - As shown in the example above, you don't have to run the ``docker`` - client with ``sudo`` or the ``docker`` group when you use - certificate authentication. That means anyone with the keys can - give any instructions to your Docker daemon, giving them root - access to the machine hosting the daemon. Guard these keys as you - would a root password! - -Other modes ------------ -If you don't want to have complete two-way authentication, you can run Docker in -various other modes by mixing the flags. - -Daemon modes -~~~~~~~~~~~~ -- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients -- tls, tlscert, tlskey: Do not authenticate clients - -Client modes -~~~~~~~~~~~~ -- tls: Authenticate server based on public/default CA pool -- tlsverify, tlscacert: Authenticate server based on given CA -- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate - server based on given CA -- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate, - authenticate server based on given CA - -The client will send its client certificate if found, so you just need to drop -your keys into `~/.docker/.pem` diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst deleted file mode 100644 index 94e2d917bb..0000000000 --- a/docs/sources/examples/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -:title: Docker Examples -:description: Examples on how to use Docker -:keywords: docker, hello world, node, nodejs, python, couch, couchdb, redis, ssh, sshd, examples, postgresql, link - - -.. _example_list: - -Examples -======== - -Here are some examples of how to use Docker to create running -processes, starting from a very simple *Hello World* and progressing -to more substantial services like those which you might find in production. - -.. toctree:: - :maxdepth: 1 - - hello_world - nodejs_web_app - running_redis_service - running_ssh_service - couchdb_data_volumes - postgresql_service - mongodb - running_riak_service - using_supervisord - cfengine_process_management - python_web_app - apt-cacher-ng - https diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst deleted file mode 100644 index 913dc2699a..0000000000 --- a/docs/sources/examples/mongodb.rst +++ /dev/null @@ -1,100 +0,0 @@ -:title: Building a Docker Image with MongoDB -:description: How to build a Docker image with MongoDB pre-installed -:keywords: docker, example, package installation, networking, mongodb - -.. _mongodb_image: - -Building an Image with MongoDB -============================== - -.. include:: example_header.inc - -The goal of this example is to show how you can build your own -Docker images with MongoDB pre-installed. We will do that by -constructing a ``Dockerfile`` that downloads a base image, adds an -apt source and installs the database software on Ubuntu. - -Creating a ``Dockerfile`` -+++++++++++++++++++++++++ - -Create an empty file called ``Dockerfile``: - -.. code-block:: bash - - touch Dockerfile - -Next, define the parent image you want to use to build your own image on top of. -Here, we’ll use `Ubuntu `_ (tag: ``latest``) -available on the `docker index `_: - -.. code-block:: bash - - FROM ubuntu:latest - -Since we want to be running the latest version of MongoDB we'll need to add the -10gen repo to our apt sources list. - -.. code-block:: bash - - # Add 10gen official apt source to the sources list - RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 - RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list - -Then, we don't want Ubuntu to complain about init not being available so we'll -divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working. - -.. code-block:: bash - - # Hack for initctl not being available in Ubuntu - RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -sf /bin/true /sbin/initctl - -Afterwards we'll be able to update our apt repositories and install MongoDB - -.. code-block:: bash - - # Install MongoDB - RUN apt-get update - RUN apt-get install mongodb-10gen - -To run MongoDB we'll have to create the default data directory (because we want it to -run without needing to provide a special configuration file) - -.. code-block:: bash - - # Create the MongoDB data directory - RUN mkdir -p /data/db - -Finally, we'll expose the standard port that MongoDB runs on, 27107, as well as -define an ``ENTRYPOINT`` instruction for the container. - -.. code-block:: bash - - EXPOSE 27017 - ENTRYPOINT ["usr/bin/mongod"] - -Now, lets build the image which will go through the ``Dockerfile`` we made and -run all of the commands. - -.. code-block:: bash - - sudo docker build -t /mongodb . - -Now you should be able to run ``mongod`` as a daemon and be able to connect on -the local port! - -.. code-block:: bash - - # Regular style - MONGO_ID=$(sudo docker run -P -d /mongodb) - - # Lean and mean - MONGO_ID=$(sudo docker run -P -d /mongodb --noprealloc --smallfiles) - - # Check the logs out - sudo docker logs $MONGO_ID - - # Connect and play around - mongo --port - -Sweet! diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst deleted file mode 100644 index 55bd76db89..0000000000 --- a/docs/sources/examples/nodejs_web_app.rst +++ /dev/null @@ -1,239 +0,0 @@ -:title: Running a Node.js app on CentOS -:description: Installing and running a Node.js app on CentOS -:keywords: docker, example, package installation, node, centos - -.. _nodejs_web_app: - -Node.js Web App -=============== - -.. include:: example_header.inc - -The goal of this example is to show you how you can build your own -Docker images from a parent image using a ``Dockerfile`` . We will do -that by making a simple Node.js hello world web application running on -CentOS. You can get the full source code at -https://github.com/gasi/docker-node-hello. - -Create Node.js app -++++++++++++++++++ - -First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its -dependencies: - -.. code-block:: json - - { - "name": "docker-centos-hello", - "private": true, - "version": "0.0.1", - "description": "Node.js Hello World app on CentOS using docker", - "author": "Daniel Gasienica ", - "dependencies": { - "express": "3.2.4" - } - } - -Then, create an ``index.js`` file that defines a web app using the -`Express.js `_ framework: - -.. code-block:: javascript - - var express = require('express'); - - // Constants - var PORT = 8080; - - // App - var app = express(); - app.get('/', function (req, res) { - res.send('Hello World\n'); - }); - - app.listen(PORT); - console.log('Running on http://localhost:' + PORT); - - -In the next steps, we’ll look at how you can run this app inside a CentOS -container using Docker. First, you’ll need to build a Docker image of your app. - -Creating a ``Dockerfile`` -+++++++++++++++++++++++++ - -Create an empty file called ``Dockerfile``: - -.. code-block:: bash - - touch Dockerfile - -Open the ``Dockerfile`` in your favorite text editor and add the following line -that defines the version of Docker the image requires to build -(this example uses Docker 0.3.4): - -.. code-block:: bash - - # DOCKER-VERSION 0.3.4 - -Next, define the parent image you want to use to build your own image on top of. -Here, we’ll use `CentOS `_ (tag: ``6.4``) -available on the `Docker index`_: - -.. code-block:: bash - - FROM centos:6.4 - -Since we’re building a Node.js app, you’ll have to install Node.js as well as -npm on your CentOS image. Node.js is required to run your app and npm to install -your app’s dependencies defined in ``package.json``. -To install the right package for CentOS, we’ll use the instructions from the -`Node.js wiki`_: - -.. code-block:: bash - - # Enable EPEL for Node.js - RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm - # Install Node.js and npm - RUN yum install -y npm - -To bundle your app’s source code inside the Docker image, use the ``ADD`` -instruction: - -.. code-block:: bash - - # Bundle app source - ADD . /src - -Install your app dependencies using the ``npm`` binary: - -.. code-block:: bash - - # Install app dependencies - RUN cd /src; npm install - -Your app binds to port ``8080`` so you’ll use the ``EXPOSE`` instruction -to have it mapped by the ``docker`` daemon: - -.. code-block:: bash - - EXPOSE 8080 - -Last but not least, define the command to run your app using ``CMD`` -which defines your runtime, i.e. ``node``, and the path to our app, -i.e. ``src/index.js`` (see the step where we added the source to the -container): - -.. code-block:: bash - - CMD ["node", "/src/index.js"] - -Your ``Dockerfile`` should now look like this: - -.. code-block:: bash - - - # DOCKER-VERSION 0.3.4 - FROM centos:6.4 - - # Enable EPEL for Node.js - RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm - # Install Node.js and npm - RUN yum install -y npm - - # Bundle app source - ADD . /src - # Install app dependencies - RUN cd /src; npm install - - EXPOSE 8080 - CMD ["node", "/src/index.js"] - - -Building your image -+++++++++++++++++++ - -Go to the directory that has your ``Dockerfile`` and run the following -command to build a Docker image. The ``-t`` flag let’s you tag your -image so it’s easier to find later using the ``docker images`` -command: - -.. code-block:: bash - - sudo docker build -t /centos-node-hello . - -Your image will now be listed by Docker: - -.. code-block:: bash - - sudo docker images - - > # Example - > REPOSITORY TAG ID CREATED - > centos 6.4 539c0211cd76 8 weeks ago - > gasi/centos-node-hello latest d64d3505b0d2 2 hours ago - - -Run the image -+++++++++++++ - -Running your image with ``-d`` runs the container in detached mode, leaving the -container running in the background. The ``-p`` flag redirects a public port to a private port in the container. Run the image you previously built: - -.. code-block:: bash - - sudo docker run -p 49160:8080 -d /centos-node-hello - -Print the output of your app: - -.. code-block:: bash - - # Get container ID - sudo docker ps - - # Print app output - sudo docker logs - - > # Example - > Running on http://localhost:8080 - - -Test -++++ - -To test your app, get the the port of your app that Docker mapped: - -.. code-block:: bash - - sudo docker ps - - > # Example - > ID IMAGE COMMAND ... PORTS - > ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080 - -In the example above, Docker mapped the ``8080`` port of the container to -``49160``. - -Now you can call your app using ``curl`` (install if needed via: -``sudo apt-get install curl``): - -.. code-block:: bash - - curl -i localhost:49160 - - > HTTP/1.1 200 OK - > X-Powered-By: Express - > Content-Type: text/html; charset=utf-8 - > Content-Length: 12 - > Date: Sun, 02 Jun 2013 03:53:22 GMT - > Connection: keep-alive - > - > Hello World - -We hope this tutorial helped you get up and running with Node.js and -CentOS on Docker. You can get the full source code at -https://github.com/gasi/docker-node-hello. - -Continue to :ref:`running_redis_service`. - - -.. _Node.js wiki: https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager#rhelcentosscientific-linux-6 -.. _docker index: https://index.docker.io/ diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst deleted file mode 100644 index 488e1530b2..0000000000 --- a/docs/sources/examples/postgresql_service.rst +++ /dev/null @@ -1,117 +0,0 @@ -:title: PostgreSQL service How-To -:description: Running and installing a PostgreSQL service -:keywords: docker, example, package installation, postgresql - -.. _postgresql_service: - -PostgreSQL Service -================== - -.. include:: example_header.inc - -Installing PostgreSQL on Docker -------------------------------- - -Assuming there is no Docker image that suits your needs in `the index`_, you -can create one yourself. - -.. _the index: http://index.docker.io - -Start by creating a new Dockerfile: - -.. note:: - - This PostgreSQL setup is for development only purposes. Refer - to the PostgreSQL documentation to fine-tune these settings so that it - is suitably secure. - -.. literalinclude:: postgresql_service.Dockerfile - -Build an image from the Dockerfile assign it a name. - -.. code-block:: bash - - $ sudo docker build -t eg_postgresql . - -And run the PostgreSQL server container (in the foreground): - -.. code-block:: bash - - $ sudo docker run --rm -P --name pg_test eg_postgresql - -There are 2 ways to connect to the PostgreSQL server. We can use -:ref:`working_with_links_names`, or we can access it from our host (or the network). - -.. note:: The ``--rm`` removes the container and its image when the container - exists successfully. - -Using container linking -^^^^^^^^^^^^^^^^^^^^^^^ - -Containers can be linked to another container's ports directly using -``--link remote_name:local_alias`` in the client's ``docker run``. This will -set a number of environment variables that can then be used to connect: - -.. code-block:: bash - - $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash - - postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password - -Connecting from your host system -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Assuming you have the postgresql-client installed, you can use the host-mapped port -to test as well. You need to use ``docker ps`` to find out what local host port the -container is mapped to first: - -.. code-block:: bash - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test - $ psql -h localhost -p 49153 -d docker -U docker --password - -Testing the database -^^^^^^^^^^^^^^^^^^^^ - -Once you have authenticated and have a ``docker =#`` prompt, you can -create a table and populate it. - -.. code-block:: bash - - psql (9.3.1) - Type "help" for help. - - docker=# CREATE TABLE cities ( - docker(# name varchar(80), - docker(# location point - docker(# ); - CREATE TABLE - docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); - INSERT 0 1 - docker=# select * from cities; - name | location - ---------------+----------- - San Francisco | (-194,53) - (1 row) - -Using the container volumes -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can use the defined volumes to inspect the PostgreSQL log files and to backup your -configuration and data: - -.. code-block:: bash - - docker run --rm --volumes-from pg_test -t -i busybox sh - - / # ls - bin etc lib linuxrc mnt proc run sys usr - dev home lib64 media opt root sbin tmp var - / # ls /etc/postgresql/9.3/main/ - environment pg_hba.conf postgresql.conf - pg_ctl.conf pg_ident.conf start.conf - /tmp # ls /var/log - ldconfig postgresql - diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst deleted file mode 100644 index 33c038f9ab..0000000000 --- a/docs/sources/examples/python_web_app.rst +++ /dev/null @@ -1,145 +0,0 @@ -:title: Python Web app example -:description: Building your own python web app using docker -:keywords: docker, example, python, web app - -.. _python_web_app: - -Python Web App -============== - -.. include:: example_header.inc - -While using Dockerfiles is the preferred way to create maintainable -and repeatable images, its useful to know how you can try things out -and then commit your live changes to an image. - -The goal of this example is to show you how you can modify your own -Docker images by making changes to a running -container, and then saving the results as a new image. We will do -that by making a simple 'hello world' Flask web application image. - -Download the initial image --------------------------- - -Download the ``shykes/pybuilder`` Docker image from the ``http://index.docker.io`` -registry. - -This image contains a ``buildapp`` script to download the web app and then ``pip install`` -any required modules, and a ``runapp`` script that finds the ``app.py`` and runs it. - -.. _`shykes/pybuilder`: https://github.com/shykes/pybuilder - -.. code-block:: bash - - $ sudo docker pull shykes/pybuilder - -.. note:: This container was built with a very old version of docker - (May 2013 - see `shykes/pybuilder`_ ), when the ``Dockerfile`` format was different, - but the image can still be used now. - -Interactively make some modifications -------------------------------------- - -We then start a new container running interactively using the image. -First, we set a ``URL`` variable that points to a tarball of a simple -helloflask web app, and then we run a command contained in the image called -``buildapp``, passing it the ``$URL`` variable. The container is -given a name ``pybuilder_run`` which we will use in the next steps. - -While this example is simple, you could run any number of interactive commands, -try things out, and then exit when you're done. - -.. code-block:: bash - - $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash - - $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz - $$ /usr/local/bin/buildapp $URL - [...] - $$ exit - -Commit the container to create a new image ------------------------------------------- - -Save the changes we just made in the container to a new image called -``/builds/github.com/shykes/helloflask/master``. You now have 3 different -ways to refer to the container: name ``pybuilder_run``, short-id ``c8b2e8228f11``, or -long-id ``c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9``. - -.. code-block:: bash - - $ sudo docker commit pybuilder_run /builds/github.com/shykes/helloflask/master - c8b2e8228f11b8b3e492cbf9a49923ae66496230056d61e07880dc74c5f495f9 - - -Run the new image to start the web worker ------------------------------------------ - -Use the new image to create a new container with -network port 5000 mapped to a local port - -.. code-block:: bash - - $ sudo docker run -d -p 5000 --name web_worker /builds/github.com/shykes/helloflask/master /usr/local/bin/runapp - - -- **"docker run -d "** run a command in a new container. We pass "-d" - so it runs as a daemon. -- **"-p 5000"** the web app is going to listen on this port, so it - must be mapped from the container to the host system. -- **/usr/local/bin/runapp** is the command which starts the web app. - - -View the container logs ------------------------ - -View the logs for the new ``web_worker`` container and -if everything worked as planned you should see the line ``Running on -http://0.0.0.0:5000/`` in the log output. - -To exit the view without stopping the container, hit Ctrl-C, or open another -terminal and continue with the example while watching the result in the logs. - -.. code-block:: bash - - $ sudo docker logs -f web_worker - * Running on http://0.0.0.0:5000/ - - -See the webapp output ---------------------- - -Look up the public-facing port which is NAT-ed. Find the private port -used by the container and store it inside of the ``WEB_PORT`` variable. - -Access the web app using the ``curl`` binary. If everything worked as planned you -should see the line ``Hello world!`` inside of your console. - -.. code-block:: bash - - $ WEB_PORT=$(sudo docker port web_worker 5000 | awk -F: '{ print $2 }') - - # install curl if necessary, then ... - $ curl http://127.0.0.1:$WEB_PORT - Hello world! - - -Clean up example containers and images --------------------------------------- - -.. code-block:: bash - - $ sudo docker ps --all - -List ``--all`` the Docker containers. If this container had already finished -running, it will still be listed here with a status of 'Exit 0'. - -.. code-block:: bash - - $ sudo docker stop web_worker - $ sudo docker rm web_worker pybuilder_run - $ sudo docker rmi /builds/github.com/shykes/helloflask/master shykes/pybuilder:latest - -And now stop the running web worker, and delete the containers, so that we can -then delete the images that we used. - diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst deleted file mode 100644 index 5a5a1b003f..0000000000 --- a/docs/sources/examples/running_redis_service.rst +++ /dev/null @@ -1,101 +0,0 @@ -:title: Running a Redis service -:description: Installing and running an redis service -:keywords: docker, example, package installation, networking, redis - -.. _running_redis_service: - -Redis Service -============= - -.. include:: example_header.inc - -Very simple, no frills, Redis service attached to a web application using a link. - -Create a docker container for Redis ------------------------------------ - -Firstly, we create a ``Dockerfile`` for our new Redis image. - -.. code-block:: bash - - FROM debian:jessie - RUN apt-get update && apt-get install -y redis-server - EXPOSE 6379 - ENTRYPOINT ["/usr/bin/redis-server"] - CMD ["--bind", "0.0.0.0"] - -Next we build an image from our ``Dockerfile``. Replace ```` -with your own user name. - -.. code-block:: bash - - sudo docker build -t /redis . - -Run the service ---------------- - -Use the image we've just created and name your container ``redis``. - -Running the service with ``-d`` runs the container in detached mode, leaving the -container running in the background. - -Importantly, we're not exposing any ports on our container. Instead we're going to -use a container link to provide access to our Redis database. - -.. code-block:: bash - - sudo docker run --name redis -d /redis - -Create your web application container -------------------------------------- - -Next we can create a container for our application. We're going to use the ``--link`` -flag to create a link to the ``redis`` container we've just created with an alias of -``db``. This will create a secure tunnel to the ``redis`` container and expose the -Redis instance running inside that container to only this container. - -.. code-block:: bash - - sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash - -Once inside our freshly created container we need to install Redis to get the -``redis-cli`` binary to test our connection. - -.. code-block:: bash - - apt-get update - apt-get -y install redis-server - service redis-server stop - -As we've used the ``--link redis:db`` option, Docker has created some environment -variables in our web application container. - -.. code-block:: bash - - env | grep DB_ - - # Should return something similar to this with your values - DB_NAME=/violet_wolf/db - DB_PORT_6379_TCP_PORT=6379 - DB_PORT=tcp://172.17.0.33:6379 - DB_PORT_6379_TCP=tcp://172.17.0.33:6379 - DB_PORT_6379_TCP_ADDR=172.17.0.33 - DB_PORT_6379_TCP_PROTO=tcp - -We can see that we've got a small list of environment variables prefixed with ``DB``. -The ``DB`` comes from the link alias specified when we launched the container. Let's use -the ``DB_PORT_6379_TCP_ADDR`` variable to connect to our Redis container. - -.. code-block:: bash - - redis-cli -h $DB_PORT_6379_TCP_ADDR - redis 172.17.0.33:6379> - redis 172.17.0.33:6379> set docker awesome - OK - redis 172.17.0.33:6379> get docker - "awesome" - redis 172.17.0.33:6379> exit - -We could easily use this or other environment variables in our web application to make a -connection to our ``redis`` container. - diff --git a/docs/sources/examples/running_riak_service.rst b/docs/sources/examples/running_riak_service.rst deleted file mode 100644 index 55e5e405c9..0000000000 --- a/docs/sources/examples/running_riak_service.rst +++ /dev/null @@ -1,151 +0,0 @@ -:title: Running a Riak service -:description: Build a Docker image with Riak pre-installed -:keywords: docker, example, package installation, networking, riak - -Riak Service -============================== - -.. include:: example_header.inc - -The goal of this example is to show you how to build a Docker image with Riak -pre-installed. - -Creating a ``Dockerfile`` -+++++++++++++++++++++++++ - -Create an empty file called ``Dockerfile``: - -.. code-block:: bash - - touch Dockerfile - -Next, define the parent image you want to use to build your image on top of. -We’ll use `Ubuntu `_ (tag: ``latest``), -which is available on the `docker index `_: - -.. code-block:: bash - - # Riak - # - # VERSION 0.1.0 - - # Use the Ubuntu base image provided by dotCloud - FROM ubuntu:latest - MAINTAINER Hector Castro hector@basho.com - -Next, we update the APT cache and apply any updates: - -.. code-block:: bash - - # Update the APT cache - RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list - RUN apt-get update - RUN apt-get upgrade -y - -After that, we install and setup a few dependencies: - -- ``curl`` is used to download Basho's APT repository key -- ``lsb-release`` helps us derive the Ubuntu release codename -- ``openssh-server`` allows us to login to containers remotely and join Riak - nodes to form a cluster -- ``supervisor`` is used manage the OpenSSH and Riak processes - -.. code-block:: bash - - # Install and setup project dependencies - RUN apt-get install -y curl lsb-release supervisor openssh-server - - RUN mkdir -p /var/run/sshd - RUN mkdir -p /var/log/supervisor - - RUN locale-gen en_US en_US.UTF-8 - - ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf - - RUN echo 'root:basho' | chpasswd - -Next, we add Basho's APT repository: - -.. code-block:: bash - - RUN curl -s http://apt.basho.com/gpg/basho.apt.key | apt-key add -- - RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list - RUN apt-get update - -After that, we install Riak and alter a few defaults: - -.. code-block:: bash - - # Install Riak and prepare it to run - RUN apt-get install -y riak - RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config - RUN echo "ulimit -n 4096" >> /etc/default/riak - -Almost there. Next, we add a hack to get us by the lack of ``initctl``: - -.. code-block:: bash - - # Hack for initctl - # See: https://github.com/dotcloud/docker/issues/1024 - RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -sf /bin/true /sbin/initctl - -Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH: - -.. code-block:: bash - - # Expose Riak Protocol Buffers and HTTP interfaces, along with SSH - EXPOSE 8087 8098 22 - -Finally, run ``supervisord`` so that Riak and OpenSSH are started: - -.. code-block:: bash - - CMD ["/usr/bin/supervisord"] - -Create a ``supervisord`` configuration file -+++++++++++++++++++++++++++++++++++++++++++ - -Create an empty file called ``supervisord.conf``. Make sure it's at the same -directory level as your ``Dockerfile``: - -.. code-block:: bash - - touch supervisord.conf - -Populate it with the following program definitions: - -.. code-block:: bash - - [supervisord] - nodaemon=true - - [program:sshd] - command=/usr/sbin/sshd -D - stdout_logfile=/var/log/supervisor/%(program_name)s.log - stderr_logfile=/var/log/supervisor/%(program_name)s.log - autorestart=true - - [program:riak] - command=bash -c ". /etc/default/riak && /usr/sbin/riak console" - pidfile=/var/log/riak/riak.pid - stdout_logfile=/var/log/supervisor/%(program_name)s.log - stderr_logfile=/var/log/supervisor/%(program_name)s.log - -Build the Docker image for Riak -+++++++++++++++++++++++++++++++ - -Now you should be able to build a Docker image for Riak: - -.. code-block:: bash - - docker build -t "/riak" . - -Next steps -++++++++++ - -Riak is a distributed database. Many production deployments consist of `at -least five nodes `_. See the `docker-riak `_ project details on how to deploy a Riak cluster using Docker and -Pipework. diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst deleted file mode 100644 index 4161275019..0000000000 --- a/docs/sources/examples/running_ssh_service.rst +++ /dev/null @@ -1,49 +0,0 @@ -:title: Running an SSH service -:description: Installing and running an sshd service -:keywords: docker, example, package installation, networking - -.. _running_ssh_service: - -SSH Daemon Service -================== - -.. include:: example_header.inc - -The following Dockerfile sets up an sshd service in a container that you can use -to connect to and inspect other container's volumes, or to get quick access to a -test container. - -.. literalinclude:: running_ssh_service.Dockerfile - -Build the image using: - -.. code-block:: bash - - $ sudo docker build -t eg_sshd . - -Then run it. You can then use ``docker port`` to find out what host port the container's -port 22 is mapped to: - -.. code-block:: bash - - $ sudo docker run -d -P --name test_sshd eg_sshd - $ sudo docker port test_sshd 22 - 0.0.0.0:49154 - -And now you can ssh to port ``49154`` on the Docker daemon's host IP address -(``ip address`` or ``ifconfig`` can tell you that): - -.. code-block:: bash - - $ ssh root@192.168.1.2 -p 49154 - # The password is ``screencast``. - $$ - -Finally, clean up after your test by stopping and removing the container, and -then removing the image. - -.. code-block:: bash - - $ sudo docker stop test_sshd - $ sudo docker rm test_sshd - $ sudo docker rmi eg_sshd diff --git a/docs/sources/examples/using_supervisord.rst b/docs/sources/examples/using_supervisord.rst deleted file mode 100644 index 750b6c2334..0000000000 --- a/docs/sources/examples/using_supervisord.rst +++ /dev/null @@ -1,128 +0,0 @@ -:title: Using Supervisor with Docker -:description: How to use Supervisor process management with Docker -:keywords: docker, supervisor, process management - -.. _using_supervisord: - -Using Supervisor with Docker -============================ - -.. include:: example_header.inc - -Traditionally a Docker container runs a single process when it is launched, for -example an Apache daemon or a SSH server daemon. Often though you want to run -more than one process in a container. There are a number of ways you can -achieve this ranging from using a simple Bash script as the value of your -container's ``CMD`` instruction to installing a process management tool. - -In this example we're going to make use of the process management tool, -`Supervisor `_, to manage multiple processes in our -container. Using Supervisor allows us to better control, manage, and restart the -processes we want to run. To demonstrate this we're going to install and manage both an -SSH daemon and an Apache daemon. - -Creating a Dockerfile ---------------------- - -Let's start by creating a basic ``Dockerfile`` for our new image. - -.. code-block:: bash - - FROM ubuntu:latest - MAINTAINER examples@docker.io - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update - RUN apt-get upgrade -y - -Installing Supervisor ---------------------- - -We can now install our SSH and Apache daemons as well as Supervisor in our container. - -.. code-block:: bash - - RUN apt-get install -y openssh-server apache2 supervisor - RUN mkdir -p /var/run/sshd - RUN mkdir -p /var/log/supervisor - -Here we're installing the ``openssh-server``, ``apache2`` and ``supervisor`` -(which provides the Supervisor daemon) packages. We're also creating two new -directories that are needed to run our SSH daemon and Supervisor. - -Adding Supervisor's configuration file --------------------------------------- - -Now let's add a configuration file for Supervisor. The default file is called -``supervisord.conf`` and is located in ``/etc/supervisor/conf.d/``. - -.. code-block:: bash - - ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf - -Let's see what is inside our ``supervisord.conf`` file. - -.. code-block:: bash - - [supervisord] - nodaemon=true - - [program:sshd] - command=/usr/sbin/sshd -D - - [program:apache2] - command=/bin/bash -c "source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND" - -The ``supervisord.conf`` configuration file contains directives that configure -Supervisor and the processes it manages. The first block ``[supervisord]`` -provides configuration for Supervisor itself. We're using one directive, -``nodaemon`` which tells Supervisor to run interactively rather than daemonize. - -The next two blocks manage the services we wish to control. Each block controls -a separate process. The blocks contain a single directive, ``command``, which -specifies what command to run to start each process. - -Exposing ports and running Supervisor -------------------------------------- - -Now let's finish our ``Dockerfile`` by exposing some required ports and -specifying the ``CMD`` instruction to start Supervisor when our container -launches. - -.. code-block:: bash - - EXPOSE 22 80 - CMD ["/usr/bin/supervisord"] - -Here we've exposed ports 22 and 80 on the container and we're running the -``/usr/bin/supervisord`` binary when the container launches. - -Building our container ----------------------- - -We can now build our new container. - -.. code-block:: bash - - sudo docker build -t /supervisord . - -Running our Supervisor container --------------------------------- - -Once we've got a built image we can launch a container from it. - -.. code-block:: bash - - sudo docker run -p 22 -p 80 -t -i /supervisord - 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) - 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing - 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 - 2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6 - 2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7 - . . . - -We've launched a new container interactively using the ``docker run`` command. -That container has run Supervisor and launched the SSH and Apache daemons with -it. We've specified the ``-p`` flag to expose ports 22 and 80. From here we can -now identify the exposed ports and connect to one or both of the SSH and Apache -daemons. - diff --git a/docs/sources/faq.rst b/docs/sources/faq.rst deleted file mode 100644 index 07055941bd..0000000000 --- a/docs/sources/faq.rst +++ /dev/null @@ -1,224 +0,0 @@ -:title: FAQ -:description: Most frequently asked questions. -:keywords: faq, questions, documentation, docker - -FAQ -=== - - -Most frequently asked questions. --------------------------------- - -How much does Docker cost? -.......................... - - Docker is 100% free, it is open source, so you can use it without paying. - -What open source license are you using? -....................................... - - We are using the Apache License Version 2.0, see it here: - https://github.com/dotcloud/docker/blob/master/LICENSE - -Does Docker run on Mac OS X or Windows? -....................................... - - Not at this time, Docker currently only runs on Linux, but you can - use VirtualBox to run Docker in a virtual machine on your box, and - get the best of both worlds. Check out the :ref:`macosx` and - :ref:`windows` installation guides. The small Linux distribution boot2docker - can be run inside virtual machines on these two operating systems. - -How do containers compare to virtual machines? -.............................................. - - They are complementary. VMs are best used to allocate chunks of - hardware resources. Containers operate at the process level, which - makes them very lightweight and perfect as a unit of software - delivery. - -What does Docker add to just plain LXC? -....................................... - - Docker is not a replacement for LXC. "LXC" refers to capabilities - of the Linux kernel (specifically namespaces and control groups) - which allow sandboxing processes from one another, and controlling - their resource allocations. On top of this low-level foundation of - kernel features, Docker offers a high-level tool with several - powerful functionalities: - - * *Portable deployment across machines.* - Docker defines a format for bundling an application and all its - dependencies into a single object which can be transferred to - any Docker-enabled machine, and executed there with the - guarantee that the execution environment exposed to the - application will be the same. LXC implements process sandboxing, - which is an important pre-requisite for portable deployment, but - that alone is not enough for portable deployment. If you sent me - a copy of your application installed in a custom LXC - configuration, it would almost certainly not run on my machine - the way it does on yours, because it is tied to your machine's - specific configuration: networking, storage, logging, distro, - etc. Docker defines an abstraction for these machine-specific - settings, so that the exact same Docker container can run - - unchanged - on many different machines, with many different - configurations. - - * *Application-centric.* - Docker is optimized for the deployment of applications, as - opposed to machines. This is reflected in its API, user - interface, design philosophy and documentation. By contrast, the - ``lxc`` helper scripts focus on containers as lightweight - machines - basically servers that boot faster and need less - RAM. We think there's more to containers than just that. - - * *Automatic build.* - Docker includes :ref:`a tool for developers to automatically - assemble a container from their source code `, - with full control over application dependencies, build tools, - packaging etc. They are free to use ``make, maven, chef, puppet, - salt,`` Debian packages, RPMs, source tarballs, or any - combination of the above, regardless of the configuration of the - machines. - - * *Versioning.* - Docker includes git-like capabilities for tracking successive - versions of a container, inspecting the diff between versions, - committing new versions, rolling back etc. The history also - includes how a container was assembled and by whom, so you get - full traceability from the production server all the way back to - the upstream developer. Docker also implements incremental - uploads and downloads, similar to ``git pull``, so new versions - of a container can be transferred by only sending diffs. - - * *Component re-use.* - Any container can be used as a :ref:`"base image" - ` to create more specialized components. This - can be done manually or as part of an automated build. For - example you can prepare the ideal Python environment, and use it - as a base for 10 different applications. Your ideal Postgresql - setup can be re-used for all your future projects. And so on. - - * *Sharing.* - Docker has access to a `public registry - `_ where thousands of people have - uploaded useful containers: anything from Redis, CouchDB, - Postgres to IRC bouncers to Rails app servers to Hadoop to base - images for various Linux distros. The :ref:`registry - ` also includes an official "standard - library" of useful containers maintained by the Docker team. The - registry itself is open-source, so anyone can deploy their own - registry to store and transfer private containers, for internal - server deployments for example. - - * *Tool ecosystem.* - Docker defines an API for automating and customizing the - creation and deployment of containers. There are a huge number - of tools integrating with Docker to extend its - capabilities. PaaS-like deployment (Dokku, Deis, Flynn), - multi-node orchestration (Maestro, Salt, Mesos, Openstack Nova), - management dashboards (docker-ui, Openstack Horizon, Shipyard), - configuration management (Chef, Puppet), continuous integration - (Jenkins, Strider, Travis), etc. Docker is rapidly establishing - itself as the standard for container-based tooling. - -What is different between a Docker container and a VM? -...................................................... - -There's a great StackOverflow answer `showing the differences `_. - -Do I lose my data when the container exits? -........................................... - -Not at all! Any data that your application writes to disk gets preserved -in its container until you explicitly delete the container. The file -system for the container persists even after the container halts. - -How far do Docker containers scale? -................................... - -Some of the largest server farms in the world today are based on containers. -Large web deployments like Google and Twitter, and platform providers such as -Heroku and dotCloud all run on container technology, at a scale of hundreds of -thousands or even millions of containers running in parallel. - -How do I connect Docker containers? -................................... - -Currently the recommended way to link containers is via the `link` primitive. -You can see details of how to `work with links here -`_. - -Also of useful when enabling more flexible service portability is the -`Ambassador linking pattern -`_. - -How do I run more than one process in a Docker container? -......................................................... - -Any capable process supervisor such as http://supervisord.org/, runit, s6, or -daemontools can do the trick. Docker will start up the process management -daemon which will then fork to run additional processes. As long as the -processor manager daemon continues to run, the container will continue to as -well. You can see a more substantial example `that uses supervisord here -`_. - -What platforms does Docker run on? -.................................. - -Linux: - -- Ubuntu 12.04, 13.04 et al -- Fedora 19/20+ -- RHEL 6.5+ -- Centos 6+ -- Gentoo -- ArchLinux -- openSUSE 12.3+ -- CRUX 3.0+ - -Cloud: - -- Amazon EC2 -- Google Compute Engine -- Rackspace - -How do I report a security issue with Docker? -............................................. - -You can learn about the project's security policy `here `_ -and report security issues to this `mailbox `_. - -Why do I need to sign my commits to Docker with the DCO? -........................................................ - -Please read `our blog post `_ on the introduction of the DCO. - -Can I help by adding some questions and answers? -................................................ - -Definitely! You can fork `the repo`_ and edit the documentation sources. - - -Where can I find more answers? -.............................. - - You can find more answers on: - - * `Docker user mailinglist`_ - * `Docker developer mailinglist`_ - * `IRC, docker on freenode`_ - * `GitHub`_ - * `Ask questions on Stackoverflow`_ - * `Join the conversation on Twitter`_ - - - .. _Docker user mailinglist: https://groups.google.com/d/forum/docker-user - .. _Docker developer mailinglist: https://groups.google.com/d/forum/docker-dev - .. _the repo: http://www.github.com/dotcloud/docker - .. _IRC, docker on freenode: irc://chat.freenode.net#docker - .. _Github: http://www.github.com/dotcloud/docker - .. _Ask questions on Stackoverflow: http://stackoverflow.com/search?q=docker - .. _Join the conversation on Twitter: http://twitter.com/docker - -Looking for something else to read? Checkout the :ref:`hello_world` example. diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst deleted file mode 100644 index b062a15e1e..0000000000 --- a/docs/sources/installation/amazon.rst +++ /dev/null @@ -1,107 +0,0 @@ -:title: Installation on Amazon EC2 -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: amazon ec2, virtualization, cloud, docker, documentation, installation - -Amazon EC2 -========== - -.. include:: install_header.inc - -There are several ways to install Docker on AWS EC2: - -* :ref:`amazonquickstart_new` or -* :ref:`amazonquickstart` or -* :ref:`amazonstandard` - -**You'll need an** `AWS account `_ **first, of course.** - -.. _amazonquickstart: - -Amazon QuickStart ------------------ - -1. **Choose an image:** - - * Launch the `Create Instance Wizard - `_ menu - on your AWS Console. - - * Click the ``Select`` button for a 64Bit Ubuntu image. For example: Ubuntu Server 12.04.3 LTS - - * For testing you can use the default (possibly free) - ``t1.micro`` instance (more info on `pricing - `_). - - * Click the ``Next: Configure Instance Details`` button at the bottom right. - -2. **Tell CloudInit to install Docker:** - - * When you're on the "Configure Instance Details" step, expand the "Advanced - Details" section. - - * Under "User data", select "As text". - - * Enter ``#include https://get.docker.io`` into the instance *User Data*. - `CloudInit `_ is part of the - Ubuntu image you chose; it will bootstrap Docker by running the shell - script located at this URL. - -3. After a few more standard choices where defaults are probably ok, your AWS - Ubuntu instance with Docker should be running! - -**If this is your first AWS instance, you may need to set up your -Security Group to allow SSH.** By default all incoming ports to your -new instance will be blocked by the AWS Security Group, so you might -just get timeouts when you try to connect. - -Installing with ``get.docker.io`` (as above) will create a service named -``lxc-docker``. It will also set up a :ref:`docker group ` and you -may want to add the *ubuntu* user to it so that you don't have to use ``sudo`` -for every Docker command. - -Once you've got Docker installed, you're ready to try it out -- head -on over to the :doc:`../use/basics` or :doc:`../examples/index` section. - -.. _amazonquickstart_new: - -Amazon QuickStart (Release Candidate - March 2014) --------------------------------------------------- - -Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). Docker packages -can now be installed from Amazon's provided Software Repository. - -1. **Choose an image:** - - * Launch the `Create Instance Wizard - `_ menu - on your AWS Console. - - * Click the ``Community AMI`` menu option on the left side - - * Search for '2014.03' and select one of the Amazon provided AMI, for example ``amzn-ami-pv-2014.03.rc-0.x86_64-ebs`` - - * For testing you can use the default (possibly free) - ``t1.micro`` instance (more info on `pricing - `_). - - * Click the ``Next: Configure Instance Details`` button at the bottom right. - -2. After a few more standard choices where defaults are probably ok, your Amazon - Linux instance should be running! - -3. SSH to your instance to install Docker : ``ssh -i ec2-user@`` - -4. Once connected to the instance, type ``sudo yum install -y docker ; sudo service docker start`` to install and start Docker - -.. _amazonstandard: - -Standard Ubuntu Installation ----------------------------- - -If you want a more hands-on installation, then you can follow the -:ref:`ubuntu_linux` instructions installing Docker on any EC2 instance -running Ubuntu. Just follow Step 1 from :ref:`amazonquickstart` to -pick an image (or use one of your own) and skip the step with the -*User Data*. Then continue with the :ref:`ubuntu_linux` instructions. - -Continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/archlinux.rst b/docs/sources/installation/archlinux.rst deleted file mode 100644 index c9b4c1d2c5..0000000000 --- a/docs/sources/installation/archlinux.rst +++ /dev/null @@ -1,73 +0,0 @@ -:title: Installation on Arch Linux -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: arch linux, virtualization, docker, documentation, installation - -.. _arch_linux: - -Arch Linux -========== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Installing on Arch Linux can be handled via the package in community: - -* `docker `_ - -or the following AUR package: - -* `docker-git `_ - -The docker package will install the latest tagged version of docker. -The docker-git package will build from the current master branch. - -Dependencies ------------- - -Docker depends on several packages which are specified as dependencies in -the packages. The core dependencies are: - -* bridge-utils -* device-mapper -* iproute2 -* lxc -* sqlite - - -Installation ------------- - -For the normal package a simple -:: - - pacman -S docker - -is all that is needed. - -For the AUR package execute: -:: - - yaourt -S docker-git - -The instructions here assume **yaourt** is installed. See -`Arch User Repository `_ -for information on building and installing packages from the AUR if you have not -done so before. - - -Starting Docker ---------------- - -There is a systemd service unit created for docker. To start the docker service: - -:: - - sudo systemctl start docker - - -To start on system boot: - -:: - - sudo systemctl enable docker diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst deleted file mode 100644 index 9fa880b364..0000000000 --- a/docs/sources/installation/binaries.rst +++ /dev/null @@ -1,123 +0,0 @@ -:title: Installation from Binaries -:description: This instruction set is meant for hackers who want to try out Docker on a variety of environments. -:keywords: binaries, installation, docker, documentation, linux - -.. _binaries: - -Binaries -======== - -.. include:: install_header.inc - -**This instruction set is meant for hackers who want to try out Docker -on a variety of environments.** - -Before following these directions, you should really check if a -packaged version of Docker is already available for your distribution. -We have packages for many distributions, and more keep showing up all -the time! - - -Check runtime dependencies --------------------------- - -.. DOC COMMENT: this should be kept in sync with - https://github.com/dotcloud/docker/blob/master/hack/PACKAGERS.md#runtime-dependencies - -To run properly, docker needs the following software to be installed at runtime: - -- iptables version 1.4 or later -- Git version 1.7 or later -- procps (or similar provider of a "ps" executable) -- XZ Utils 4.9 or later -- a `properly mounted - `_ - cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is - `_ `not - `_ `sufficient - `_) - - -Check kernel dependencies -------------------------- - -Docker in daemon mode has specific kernel requirements. For details, -check your distribution in :ref:`installation_list`. - -In general, a 3.8 Linux kernel (or higher) is preferred, as some of the -prior versions have known issues that are triggered by Docker. - -Note that Docker also has a client mode, which can run on virtually -any Linux kernel (it even builds on OSX!). - - -Get the docker binary: ----------------------- - -.. code-block:: bash - - wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker - chmod +x docker - -.. note:: - If you have trouble downloading the binary, you can also get the smaller - compressed release file: https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz - -Run the docker daemon ---------------------- - -.. code-block:: bash - - # start the docker in daemon mode from the directory you unpacked - sudo ./docker -d & - - -.. _dockergroup: - -Giving non-root access ----------------------- - -The ``docker`` daemon always runs as the root user, and since Docker -version 0.5.2, the ``docker`` daemon binds to a Unix socket instead of -a TCP port. By default that Unix socket is owned by the user *root*, -and so, by default, you can access it with ``sudo``. - -Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the ``docker`` -daemon will make the ownership of the Unix socket read/writable by the -*docker* group when the daemon starts. The ``docker`` daemon must -always run as the root user, but if you run the ``docker`` client as a -user in the *docker* group then you don't need to add ``sudo`` to all -the client commands. - -.. warning:: The *docker* group (or the group specified with ``-G``) is - root-equivalent; see :ref:`dockersecurity_daemon` details. - - -Upgrades --------- - -To upgrade your manual installation of Docker, first kill the docker -daemon: - -.. code-block:: bash - - killall docker - -Then follow the regular installation steps. - - -Run your first container! -------------------------- - -.. code-block:: bash - - # check your docker version - sudo ./docker version - - # run a container and open an interactive shell in the container - sudo ./docker run -i -t ubuntu /bin/bash - - - -Continue with the :ref:`hello_world` example. diff --git a/docs/sources/installation/cruxlinux.rst b/docs/sources/installation/cruxlinux.rst deleted file mode 100644 index d1970cd1bf..0000000000 --- a/docs/sources/installation/cruxlinux.rst +++ /dev/null @@ -1,98 +0,0 @@ -:title: Installation on CRUX Linux -:description: Docker installation on CRUX Linux. -:keywords: crux linux, virtualization, Docker, documentation, installation - -.. _crux_linux: - - -CRUX Linux -========== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Installing on CRUX Linux can be handled via the ports from `James Mills `_: - -* `docker `_ - -* `docker-bin `_ - -* `docker-git `_ - -The ``docker`` port will install the latest tagged version of Docker. -The ``docker-bin`` port will install the latest tagged versin of Docker from upstream built binaries. -The ``docker-git`` package will build from the current master branch. - - -Installation ------------- - -For the time being (*until the CRUX Docker port(s) get into the official contrib repository*) you will need to install -`James Mills' `_ ports repository. You can do so via: - -Download the ``httpup`` file to ``/etc/ports/``: -:: - - curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup - - -Add ``prtdir /usr/ports/prologic`` to ``/etc/prt-get.conf``: -:: - - vim /etc/prt-get.conf - - # or: - echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf - - -Update ports and prt-get cache: -:: - - ports -u - prt-get cache - - -To install (*and its dependencies*): -:: - - prt-get depinst docker - - -Use ``docker-bin`` for the upstream binary or ``docker-git`` to build and install from the master branch from git. - - -Kernel Requirements -------------------- - -To have a working **CRUX+Docker** Host you must ensure your Kernel -has the necessary modules enabled for LXC containers to function -correctly and Docker Daemon to work properly. - -Please read the ``README.rst``: -:: - - prt-get readme docker - -There is a ``test_kernel_config.sh`` script in the above ports which you can use to test your Kernel configuration: - -:: - - cd /usr/ports/prologic/docker - ./test_kernel_config.sh /usr/src/linux/.config - - -Starting Docker ---------------- - -There is a rc script created for Docker. To start the Docker service: - -:: - - sudo su - - /etc/rc.d/docker start - -To start on system boot: - -- Edit ``/etc/rc.conf`` -- Put ``docker`` into the ``SERVICES=(...)`` array after ``net``. diff --git a/docs/sources/installation/fedora.rst b/docs/sources/installation/fedora.rst deleted file mode 100644 index 3b95f04f7f..0000000000 --- a/docs/sources/installation/fedora.rst +++ /dev/null @@ -1,75 +0,0 @@ -:title: Installation on Fedora -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux - -.. _fedora: - -Fedora -====== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Docker is available in **Fedora 19 and later**. Please note that due to the -current Docker limitations Docker is able to run only on the **64 bit** -architecture. - -Installation ------------- - -The ``docker-io`` package provides Docker on Fedora. - - -If you have the (unrelated) ``docker`` package installed already, it will -conflict with ``docker-io``. There's a `bug report`_ filed for it. -To proceed with ``docker-io`` installation on Fedora 19 or Fedora 20, please -remove ``docker`` first. - -.. code-block:: bash - - sudo yum -y remove docker - -For Fedora 21 and later, the ``wmdocker`` package will provide the same -functionality as the old ``docker`` and will also not conflict with ``docker-io``. - -.. code-block:: bash - - sudo yum -y install wmdocker - sudo yum -y remove docker - -Install the ``docker-io`` package which will install Docker on our host. - -.. code-block:: bash - - sudo yum -y install docker-io - - -To update the ``docker-io`` package: - -.. code-block:: bash - - sudo yum -y update docker-io - -Now that it's installed, let's start the Docker daemon. - -.. code-block:: bash - - sudo systemctl start docker - -If we want Docker to start at boot, we should also: - -.. code-block:: bash - - sudo systemctl enable docker - -Now let's verify that Docker is working. - -.. code-block:: bash - - sudo docker run -i -t fedora /bin/bash - -**Done!**, now continue with the :ref:`hello_world` example. - -.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676 - diff --git a/docs/sources/installation/frugalware.rst b/docs/sources/installation/frugalware.rst deleted file mode 100644 index ed9bb2bfaa..0000000000 --- a/docs/sources/installation/frugalware.rst +++ /dev/null @@ -1,62 +0,0 @@ -:title: Installation on FrugalWare -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: frugalware linux, virtualization, docker, documentation, installation - -.. _frugalware: - -FrugalWare -========== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Installing on FrugalWare is handled via the official packages: - -* `lxc-docker i686 `_ - -* `lxc-docker x86_64 `_ - -The `lxc-docker` package will install the latest tagged version of Docker. - -Dependencies ------------- - -Docker depends on several packages which are specified as dependencies in -the packages. The core dependencies are: - -* systemd -* lvm2 -* sqlite3 -* libguestfs -* lxc -* iproute2 -* bridge-utils - - -Installation ------------- - -A simple -:: - - pacman -S lxc-docker - -is all that is needed. - - -Starting Docker ---------------- - -There is a systemd service unit created for Docker. To start Docker as service: - -:: - - sudo systemctl start lxc-docker - - -To start on system boot: - -:: - - sudo systemctl enable lxc-docker diff --git a/docs/sources/installation/gentoolinux.rst b/docs/sources/installation/gentoolinux.rst deleted file mode 100644 index 5abfddeb91..0000000000 --- a/docs/sources/installation/gentoolinux.rst +++ /dev/null @@ -1,84 +0,0 @@ -:title: Installation on Gentoo -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: gentoo linux, virtualization, docker, documentation, installation - -.. _gentoo_linux: - -Gentoo -====== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Installing Docker on Gentoo Linux can be accomplished using one of two methods. -The first and best way if you're looking for a stable experience is to use the -official `app-emulation/docker` package directly in the portage tree. - -If you're looking for a ``-bin`` ebuild, a live ebuild, or bleeding edge -ebuild changes/fixes, the second installation method is to use the overlay -provided at https://github.com/tianon/docker-overlay which can be added using -``app-portage/layman``. The most accurate and up-to-date documentation for -properly installing and using the overlay can be found in `the overlay README -`_. - -Note that sometimes there is a disparity between the latest version and what's -in the overlay, and between the latest version in the overlay and what's in the -portage tree. Please be patient, and the latest version should propagate -shortly. - -Installation -^^^^^^^^^^^^ - -The package should properly pull in all the necessary dependencies and prompt -for all necessary kernel options. The ebuilds for 0.7+ include use flags to -pull in the proper dependencies of the major storage drivers, with the -"device-mapper" use flag being enabled by default, since that is the simplest -installation path. - -.. code-block:: bash - - sudo emerge -av app-emulation/docker - -If any issues arise from this ebuild or the resulting binary, including and -especially missing kernel configuration flags and/or dependencies, `open an -issue on the docker-overlay repository -`_ or ping tianon directly in -the #docker IRC channel on the freenode network. - -Starting Docker -^^^^^^^^^^^^^^^ - -Ensure that you are running a kernel that includes all the necessary modules -and/or configuration for LXC (and optionally for device-mapper and/or AUFS, -depending on the storage driver you've decided to use). - -OpenRC ------- - -To start the docker daemon: - -.. code-block:: bash - - sudo /etc/init.d/docker start - -To start on system boot: - -.. code-block:: bash - - sudo rc-update add docker default - -systemd -------- - -To start the docker daemon: - -.. code-block:: bash - - sudo systemctl start docker.service - -To start on system boot: - -.. code-block:: bash - - sudo systemctl enable docker.service diff --git a/docs/sources/installation/google.rst b/docs/sources/installation/google.rst deleted file mode 100644 index cc1df5da24..0000000000 --- a/docs/sources/installation/google.rst +++ /dev/null @@ -1,58 +0,0 @@ -:title: Installation on Google Cloud Platform -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform - -`Google Cloud Platform `_ -==================================================== - -.. include:: install_header.inc - -.. _googlequickstart: - -`Compute Engine `_ QuickStart for `Debian `_ ------------------------------------------------------------------------------------------------------------ - -1. Go to `Google Cloud Console `_ and create a new Cloud Project with `Compute Engine enabled `_. - -2. Download and configure the `Google Cloud SDK `_ to use your project with the following commands: - -.. code-block:: bash - - $ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash - $ gcloud auth login - Enter a cloud project id (or leave blank to not set): - -3. Start a new instance, select a zone close to you and the desired instance size: - -.. code-block:: bash - - $ gcutil addinstance docker-playground --image=backports-debian-7 - 1: europe-west1-a - ... - 4: us-central1-b - >>> - 1: machineTypes/n1-standard-1 - ... - 12: machineTypes/g1-small - >>> - -4. Connect to the instance using SSH: - -.. code-block:: bash - - $ gcutil ssh docker-playground - docker-playground:~$ - -5. Install the latest Docker release and configure it to start when the instance boots: - -.. code-block:: bash - - docker-playground:~$ curl get.docker.io | bash - docker-playground:~$ sudo update-rc.d docker defaults - -6. Start a new container: - -.. code-block:: bash - - docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' - docker on GCE \o/ diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst deleted file mode 100644 index ae0e9196fa..0000000000 --- a/docs/sources/installation/index.rst +++ /dev/null @@ -1,34 +0,0 @@ -:title: Docker Installation -:description: many ways to install Docker -:keywords: docker, installation - -.. _installation_list: - -Installation -============ - -There are a number of ways to install Docker, depending on where you -want to run the daemon. The :ref:`ubuntu_linux` installation is the -officially-tested version. The community adds more techniques for -installing Docker all the time. - -Contents: - -.. toctree:: - :maxdepth: 1 - - ubuntulinux - rhel - fedora - archlinux - cruxlinux - gentoolinux - openSUSE - frugalware - mac - windows - amazon - rackspace - google - softlayer - binaries diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst deleted file mode 100644 index d5243625a7..0000000000 --- a/docs/sources/installation/mac.rst +++ /dev/null @@ -1,212 +0,0 @@ -:title: Installation on Mac OS X 10.6 Snow Leopard -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac - -.. _macosx: - -======== -Mac OS X -======== - -.. note:: - - These instructions are available with the new release of Docker - (version 0.8). However, they are subject to change. - -.. include:: install_header.inc - -Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. - -How To Install Docker On Mac OS X -================================= - -VirtualBox ----------- - -Docker on OS X needs VirtualBox to run. To begin with, head over to -`VirtualBox Download Page`_ and get the tool for ``OS X hosts x86/amd64``. - -.. _VirtualBox Download Page: https://www.virtualbox.org/wiki/Downloads - -Once the download is complete, open the disk image, run the set up file -(i.e. ``VirtualBox.pkg``) and install VirtualBox. Do not simply copy the -package without running the installer. - -boot2docker ------------ - -`boot2docker`_ provides a handy script to easily manage the VM running the -``docker`` daemon. It also takes care of the installation for the OS image -that is used for the job. - -.. _GitHub page: https://github.com/boot2docker/boot2docker - -With Homebrew -~~~~~~~~~~~~~ - -If you are using Homebrew on your machine, simply run the following command to install ``boot2docker``: - -.. code-block:: bash - - brew install boot2docker - -Manual installation -~~~~~~~~~~~~~~~~~~~ - -Open up a new terminal window, if you have not already. - -Run the following commands to get boot2docker: - -.. code-block:: bash - - # Enter the installation directory - cd ~/bin - - # Get the file - curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker - - # Mark it executable - chmod +x boot2docker - -Docker OS X Client ------------------- - -The ``docker`` daemon is accessed using the ``docker`` client. - -With Homebrew -~~~~~~~~~~~~~ - -Run the following command to install the ``docker`` client: - -.. code-block:: bash - - brew install docker - -Manual installation -~~~~~~~~~~~~~~~~~~~ - -Run the following commands to get it downloaded and set up: - -.. code-block:: bash - - # Get the docker client file - DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ - curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ - gunzip $DIR/ld.tgz && \ - tar xvf $DIR/ld.tar -C $DIR/ && \ - cp $DIR/usr/local/bin/docker ./docker - - # Set the environment variable for the docker daemon - export DOCKER_HOST=tcp://127.0.0.1:4243 - - # Copy the executable file - sudo cp docker /usr/local/bin/ - -And that’s it! Let’s check out how to use it. - -How To Use Docker On Mac OS X -============================= - -The ``docker`` daemon (via boot2docker) ---------------------------------------- - -Inside the ``~/bin`` directory, run the following commands: - -.. code-block:: bash - - # Initiate the VM - ./boot2docker init - - # Run the VM (the docker daemon) - ./boot2docker up - - # To see all available commands: - ./boot2docker - - # Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} - -The ``docker`` client ---------------------- - -Once the VM with the ``docker`` daemon is up, you can use the ``docker`` -client just like any other application. - -.. code-block:: bash - - docker version - # Client version: 0.7.6 - # Go version (client): go1.2 - # Git commit (client): bc3b2ec - # Server version: 0.7.5 - # Git commit (server): c348c04 - # Go version (server): go1.2 - -Forwarding VM Port Range to Host --------------------------------- - -If we take the port range that docker uses by default with the -P option -(49000-49900), and forward same range from host to vm, we'll be able to interact -with our containers as if they were running locally: - -.. code-block:: bash - - # vm must be powered off - for i in {49000..49900}; do - VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; - VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; - done - -SSH-ing The VM --------------- - -If you feel the need to connect to the VM, you can simply run: - -.. code-block:: bash - - ./boot2docker ssh - - # User: docker - # Pwd: tcuser - -You can now continue with the :ref:`hello_world` example. - -Learn More -========== - -boot2docker: ------------- - -See the GitHub page for `boot2docker`_. - -.. _boot2docker: https://github.com/boot2docker/boot2docker - -If SSH complains about keys: ----------------------------- - -.. code-block:: bash - - ssh-keygen -R '[localhost]:2022' - -Upgrading to a newer release of boot2docker -------------------------------------------- - -To upgrade an initialised VM, you can use the following 3 commands. Your persistence -disk will not be changed, so you won't lose your images and containers: - -.. code-block:: bash - - ./boot2docker stop - ./boot2docker download - ./boot2docker start - -About the way Docker works on Mac OS X: ---------------------------------------- - -Docker has two key components: the ``docker`` daemon and the ``docker`` -client. The tool works by client commanding the daemon. In order to -work and do its magic, the daemon makes use of some Linux Kernel -features (e.g. LXC, name spaces etc.), which are not supported by OS X. -Therefore, the solution of getting Docker to run on OS X consists of -running it inside a lightweight virtual machine. In order to simplify -things, Docker comes with a bash script to make this whole process as -easy as possible (i.e. boot2docker). diff --git a/docs/sources/installation/openSUSE.rst b/docs/sources/installation/openSUSE.rst deleted file mode 100644 index c791beacbf..0000000000 --- a/docs/sources/installation/openSUSE.rst +++ /dev/null @@ -1,73 +0,0 @@ -:title: Installation on openSUSE -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: openSUSE, virtualbox, docker, documentation, installation - -.. _openSUSE: - -openSUSE -======== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Docker is available in **openSUSE 12.3 and later**. Please note that due to the -current Docker limitations Docker is able to run only on the **64 bit** -architecture. - -Installation ------------- - -The ``docker`` package from the `Virtualization project`_ on `OBS`_ provides -Docker on openSUSE. - - -To proceed with Docker installation please add the right Virtualization -repository. - -.. code-block:: bash - - # openSUSE 12.3 - sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization - - # openSUSE 13.1 - sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization - - -Install the Docker package. - -.. code-block:: bash - - sudo zypper in docker - -It's also possible to install Docker using openSUSE's 1-click install. Just -visit `this`_ page, select your openSUSE version and click on the installation -link. This will add the right repository to your system and it will -also install the `docker` package. - -Now that it's installed, let's start the Docker daemon. - -.. code-block:: bash - - sudo systemctl start docker - -If we want Docker to start at boot, we should also: - -.. code-block:: bash - - sudo systemctl enable docker - -The `docker` package creates a new group named `docker`. Users, other than -`root` user, need to be part of this group in order to interact with the -Docker daemon. - -.. code-block:: bash - - sudo usermod -G docker - - -**Done!**, now continue with the :ref:`hello_world` example. - -.. _Virtualization project: https://build.opensuse.org/project/show/Virtualization -.. _OBS: https://build.opensuse.org/ -.. _this: http://software.opensuse.org/package/docker diff --git a/docs/sources/installation/rackspace.rst b/docs/sources/installation/rackspace.rst deleted file mode 100644 index 687131a413..0000000000 --- a/docs/sources/installation/rackspace.rst +++ /dev/null @@ -1,97 +0,0 @@ -:title: Installation on Rackspace Cloud -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Rackspace Cloud, installation, docker, linux, ubuntu - -Rackspace Cloud -=============== - -.. include:: install_unofficial.inc - -Installing Docker on Ubuntu provided by Rackspace is pretty -straightforward, and you should mostly be able to follow the -:ref:`ubuntu_linux` installation guide. - -**However, there is one caveat:** - -If you are using any Linux not already shipping with the 3.8 kernel -you will need to install it. And this is a little more difficult on -Rackspace. - -Rackspace boots their servers using grub's ``menu.lst`` and does not -like non 'virtual' packages (e.g. Xen compatible) kernels there, -although they do work. This results in ``update-grub`` not having the -expected result, and you will need to set the kernel manually. - -**Do not attempt this on a production machine!** - -.. code-block:: bash - - # update apt - apt-get update - - # install the new kernel - apt-get install linux-generic-lts-raring - - -Great, now you have the kernel installed in ``/boot/``, next you need to make it -boot next time. - -.. code-block:: bash - - # find the exact names - find /boot/ -name '*3.8*' - - # this should return some results - - -Now you need to manually edit ``/boot/grub/menu.lst``, you will find a -section at the bottom with the existing options. Copy the top one and -substitute the new kernel into that. Make sure the new kernel is on -top, and double check the kernel and initrd lines point to the right files. - -Take special care to double check the kernel and initrd entries. - -.. code-block:: bash - - # now edit /boot/grub/menu.lst - vi /boot/grub/menu.lst - -It will probably look something like this: - -:: - - ## ## End Default Options ## - - title Ubuntu 12.04.2 LTS, kernel 3.8.x generic - root (hd0) - kernel /boot/vmlinuz-3.8.0-19-generic root=/dev/xvda1 ro quiet splash console=hvc0 - initrd /boot/initrd.img-3.8.0-19-generic - - title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual - root (hd0) - kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash console=hvc0 - initrd /boot/initrd.img-3.2.0-38-virtual - - title Ubuntu 12.04.2 LTS, kernel 3.2.0-38-virtual (recovery mode) - root (hd0) - kernel /boot/vmlinuz-3.2.0-38-virtual root=/dev/xvda1 ro quiet splash single - initrd /boot/initrd.img-3.2.0-38-virtual - - -Reboot the server (either via command line or console) - -.. code-block:: bash - - # reboot - -Verify the kernel was updated - -.. code-block:: bash - - uname -a - # Linux docker-12-04 3.8.0-19-generic #30~precise1-Ubuntu SMP Wed May 1 22:26:36 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux - - # nice! 3.8. - - -Now you can finish with the :ref:`ubuntu_linux` instructions. diff --git a/docs/sources/installation/rhel.rst b/docs/sources/installation/rhel.rst deleted file mode 100644 index 151fba6f1f..0000000000 --- a/docs/sources/installation/rhel.rst +++ /dev/null @@ -1,85 +0,0 @@ -:title: Installation on Red Hat Enterprise Linux -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, requirements, linux, rhel, centos - -.. _rhel: - -Red Hat Enterprise Linux -======================== - -.. include:: install_header.inc - -.. include:: install_unofficial.inc - -Docker is available for **RHEL** on EPEL. These instructions should work for -both RHEL and CentOS. They will likely work for other binary compatible EL6 -distributions as well, but they haven't been tested. - -Please note that this package is part of `Extra Packages for Enterprise -Linux (EPEL)`_, a community effort to create and maintain additional packages -for the RHEL distribution. - -Also note that due to the current Docker limitations, Docker is able to run -only on the **64 bit** architecture. - -You will need `RHEL 6.5`_ or higher, with a RHEL 6 kernel version 2.6.32-431 or higher -as this has specific kernel fixes to allow Docker to work. - -Installation ------------- - -Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_. - - -The ``docker-io`` package provides Docker on EPEL. - - -If you already have the (unrelated) ``docker`` package installed, it will -conflict with ``docker-io``. There's a `bug report`_ filed for it. -To proceed with ``docker-io`` installation, please remove -``docker`` first. - - -Next, let's install the ``docker-io`` package which will install Docker on our host. - -.. code-block:: bash - - sudo yum -y install docker-io - -To update the ``docker-io`` package - -.. code-block:: bash - - sudo yum -y update docker-io - -Now that it's installed, let's start the Docker daemon. - -.. code-block:: bash - - sudo service docker start - -If we want Docker to start at boot, we should also: - -.. code-block:: bash - - sudo chkconfig docker on - -Now let's verify that Docker is working. - -.. code-block:: bash - - sudo docker run -i -t fedora /bin/bash - -**Done!**, now continue with the :ref:`hello_world` example. - -Issues? -------- - -If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_. - -.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL -.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F -.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io -.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676 -.. _RHEL 6.5: https://access.redhat.com/site/articles/3078#RHEL6 - diff --git a/docs/sources/installation/softlayer.rst b/docs/sources/installation/softlayer.rst deleted file mode 100644 index 0fe3d6df5a..0000000000 --- a/docs/sources/installation/softlayer.rst +++ /dev/null @@ -1,25 +0,0 @@ -:title: Installation on IBM SoftLayer -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation - -IBM SoftLayer -============= - -.. include:: install_header.inc - -IBM SoftLayer QuickStart -------------------------- - -1. Create an `IBM SoftLayer account `_. -2. Log in to the `SoftLayer Console `_. -3. Go to `Order Hourly Computing Instance Wizard `_ on your SoftLayer Console. -4. Create a new *CloudLayer Computing Instance* (CCI) using the default values for all the fields and choose: - -- *First Available* as ``Datacenter`` and -- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``. - -5. Click the *Continue Your Order* button at the bottom right and select *Go to checkout*. -6. Insert the required *User Metadata* and place the order. -7. Then continue with the :ref:`ubuntu_linux` instructions. - -Continue with the :ref:`hello_world` example. \ No newline at end of file diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst deleted file mode 100644 index 3e4b2a9855..0000000000 --- a/docs/sources/installation/ubuntulinux.rst +++ /dev/null @@ -1,380 +0,0 @@ -:title: Installation on Ubuntu -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux - -.. _ubuntu_linux: - -Ubuntu -====== - -.. warning:: - - These instructions have changed for 0.6. If you are upgrading from - an earlier version, you will need to follow them again. - -.. include:: install_header.inc - -Docker is supported on the following versions of Ubuntu: - -- :ref:`ubuntu_precise` -- :ref:`ubuntu_raring_saucy` - -Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated -Firewall) `_ - -.. _ubuntu_precise: - -Ubuntu Precise 12.04 (LTS) (64-bit) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This installation path should work at all times. - - -Dependencies ------------- - -**Linux kernel 3.8** - -Due to a bug in LXC, Docker works best on the 3.8 kernel. Precise -comes with a 3.2 kernel, so we need to upgrade it. The kernel you'll -install when following these steps comes with AUFS built in. We also -include the generic headers to enable packages that depend on them, -like ZFS and the VirtualBox guest additions. If you didn't install the -headers for your "precise" kernel, then you can skip these headers for -the "raring" kernel. But it is safer to include them if you're not -sure. - - -.. code-block:: bash - - # install the backported kernel - sudo apt-get update - sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring - - # reboot - sudo reboot - - -Installation ------------- - -.. warning:: - - These instructions have changed for 0.6. If you are upgrading from - an earlier version, you will need to follow them again. - -Docker is available as a Debian package, which makes installation -easy. **See the** :ref:`installmirrors` **section below if you are not in -the United States.** Other sources of the Debian packages may be -faster for you to install. - -First, check that your APT system can deal with ``https`` URLs: -the file ``/usr/lib/apt/methods/https`` should exist. If it doesn't, -you need to install the package ``apt-transport-https``. - -.. code-block:: bash - - [ -e /usr/lib/apt/methods/https ] || { - apt-get update - apt-get install apt-transport-https - } - -Then, add the Docker repository key to your local keychain. - -.. code-block:: bash - - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - -Add the Docker repository to your apt sources list, update and install the -``lxc-docker`` package. - -*You may receive a warning that the package isn't trusted. Answer yes to -continue installation.* - -.. code-block:: bash - - sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\ - > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker - -.. note:: - - There is also a simple ``curl`` script available to help with this process. - - .. code-block:: bash - - curl -s https://get.docker.io/ubuntu/ | sudo sh - -Now verify that the installation has worked by downloading the ``ubuntu`` image -and launching a container. - -.. code-block:: bash - - sudo docker run -i -t ubuntu /bin/bash - -Type ``exit`` to exit - -**Done!**, now continue with the :ref:`hello_world` example. - -.. _ubuntu_raring_saucy: - -Ubuntu Raring 13.04 and Saucy 13.10 (64 bit) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10. - -Dependencies ------------- - -**Optional AUFS filesystem support** - -Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems -have AUFS filesystem support enabled. AUFS support is optional as of version 0.7, but it's still available as -a driver and we recommend using it if you can. - -To make sure AUFS is installed, run the following commands: - -.. code-block:: bash - - sudo apt-get update - sudo apt-get install linux-image-extra-`uname -r` - - -Installation ------------- - -Docker is available as a Debian package, which makes installation easy. - -.. warning:: - - Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need - to follow them again. - -First add the Docker repository key to your local keychain. - -.. code-block:: bash - - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - -Add the Docker repository to your apt sources list, update and install the -``lxc-docker`` package. - -.. code-block:: bash - - sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\ - > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker - -Now verify that the installation has worked by downloading the ``ubuntu`` image -and launching a container. - -.. code-block:: bash - - sudo docker run -i -t ubuntu /bin/bash - -Type ``exit`` to exit - -**Done!**, now continue with the :ref:`hello_world` example. - - -Giving non-root access ----------------------- - -The ``docker`` daemon always runs as the root user, and since Docker version -0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By -default that Unix socket is owned by the user *root*, and so, by default, you -can access it with ``sudo``. - -Starting in version 0.5.3, if you (or your Docker installer) create a -Unix group called *docker* and add users to it, then the ``docker`` -daemon will make the ownership of the Unix socket read/writable by the -*docker* group when the daemon starts. The ``docker`` daemon must -always run as the root user, but if you run the ``docker`` client as a user in -the *docker* group then you don't need to add ``sudo`` to all the -client commands. As of 0.9.0, you can specify that a group other than ``docker`` -should own the Unix socket with the ``-G`` option. - -.. warning:: The *docker* group (or the group specified with ``-G``) is - root-equivalent; see :ref:`dockersecurity_daemon` details. - - -**Example:** - -.. code-block:: bash - - # Add the docker group if it doesn't already exist. - sudo groupadd docker - - # Add the connected user "${USER}" to the docker group. - # Change the user name to match your preferred user. - # You may have to logout and log back in again for - # this to take effect. - sudo gpasswd -a ${USER} docker - - # Restart the Docker daemon. - sudo service docker restart - - -Upgrade --------- - -To install the latest version of docker, use the standard ``apt-get`` method: - - -.. code-block:: bash - - # update your sources list - sudo apt-get update - - # install the latest - sudo apt-get install lxc-docker - -Memory and Swap Accounting -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to enable memory and swap accounting, you must add the following -command-line parameters to your kernel:: - - cgroup_enable=memory swapaccount=1 - -On systems using GRUB (which is the default for Ubuntu), you can add those -parameters by editing ``/etc/default/grub`` and extending -``GRUB_CMDLINE_LINUX``. Look for the following line:: - - GRUB_CMDLINE_LINUX="" - -And replace it by the following one:: - - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -Then run ``sudo update-grub``, and reboot. - -These parameters will help you get rid of the following warnings:: - - WARNING: Your kernel does not support cgroup swap limit. - WARNING: Your kernel does not support swap limit capabilities. Limitation discarded. - -Troubleshooting -^^^^^^^^^^^^^^^ - -On Linux Mint, the ``cgroup-lite`` package is not installed by default. -Before Docker will work correctly, you will need to install this via: - -.. code-block:: bash - - sudo apt-get update && sudo apt-get install cgroup-lite - -.. _ufw: - -Docker and UFW -^^^^^^^^^^^^^^ - -Docker uses a bridge to manage container networking. By default, UFW drops all -`forwarding` traffic. As a result you will need to enable UFW forwarding: - -.. code-block:: bash - - sudo nano /etc/default/ufw - ---- - # Change: - # DEFAULT_FORWARD_POLICY="DROP" - # to - DEFAULT_FORWARD_POLICY="ACCEPT" - -Then reload UFW: - -.. code-block:: bash - - sudo ufw reload - - -UFW's default set of rules denies all `incoming` traffic. If you want to be -able to reach your containers from another host then you should allow -incoming connections on the Docker port (default 4243): - -.. code-block:: bash - - sudo ufw allow 4243/tcp - -Docker and local DNS server warnings -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Systems which are running Ubuntu or an Ubuntu derivative on the desktop will -use `127.0.0.1` as the default nameserver in `/etc/resolv.conf`. NetworkManager -sets up dnsmasq to use the real DNS servers of the connection and sets up -`nameserver 127.0.0.1` in `/etc/resolv.conf`. - -When starting containers on these desktop machines, users will see a warning: - -.. code-block:: bash - - WARNING: Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : [8.8.8.8 8.8.4.4] - -This warning is shown because the containers can't use the local DNS nameserver -and Docker will default to using an external nameserver. - -This can be worked around by specifying a DNS server to be used by the Docker -daemon for the containers: - -.. code-block:: bash - - sudo nano /etc/default/docker - --- - # Add: - DOCKER_OPTS="--dns 8.8.8.8" - # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1 - # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1 - -The Docker daemon has to be restarted: - -.. code-block:: bash - - sudo restart docker - -.. warning:: If you're doing this on a laptop which connects to various networks, make sure to choose a public DNS server. - -An alternative solution involves disabling dnsmasq in NetworkManager by -following these steps: - -.. code-block:: bash - - sudo nano /etc/NetworkManager/NetworkManager.conf - ---- - # Change: - dns=dnsmasq - # to - #dns=dnsmasq - -NetworkManager and Docker need to be restarted afterwards: - -.. code-block:: bash - - sudo restart network-manager - sudo restart docker - -.. warning:: This might make DNS resolution slower on some networks. - -.. _installmirrors: - -Mirrors -^^^^^^^ - -You should ``ping get.docker.io`` and compare the latency to the -following mirrors, and pick whichever one is best for you. - -Yandex ------- - -`Yandex `_ in Russia is mirroring the Docker Debian -packages, updating every 6 hours. Substitute -``http://mirror.yandex.ru/mirrors/docker/`` for -``http://get.docker.io/ubuntu`` in the instructions above. For example: - -.. code-block:: bash - - sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\ - > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker diff --git a/docs/sources/installation/windows.rst b/docs/sources/installation/windows.rst deleted file mode 100755 index ceb29c8853..0000000000 --- a/docs/sources/installation/windows.rst +++ /dev/null @@ -1,72 +0,0 @@ -:title: Installation on Windows -:description: Please note this project is currently under heavy development. It should not be used in production. -:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, boot2docker - -.. _windows: - -Microsoft Windows -================= - -Docker can run on Windows using a virtualization platform like VirtualBox. A Linux -distribution is run inside a virtual machine and that's where Docker will run. - -Installation ------------- - -.. include:: install_header.inc - -1. Install VirtualBox from https://www.virtualbox.org - or follow this `tutorial `_. - -2. Download the latest boot2docker.iso from https://github.com/boot2docker/boot2docker/releases. - -3. Start VirtualBox. - -4. Create a new Virtual machine with the following settings: - - - `Name: boot2docker` - - `Type: Linux` - - `Version: Linux 2.6 (64 bit)` - - `Memory size: 1024 MB` - - `Hard drive: Do not add a virtual hard drive` - -5. Open the settings of the virtual machine: - - 5.1. go to Storage - - 5.2. click the empty slot below `Controller: IDE` - - 5.3. click the disc icon on the right of `IDE Secondary Master` - - 5.4. click `Choose a virtual CD/DVD disk file` - -6. Browse to the path where you've saved the `boot2docker.iso`, select the `boot2docker.iso` and click open. - -7. Click OK on the Settings dialog to save the changes and close the window. - -8. Start the virtual machine by clicking the green start button. - -9. The boot2docker virtual machine should boot now. - -Running Docker --------------- - -boot2docker will log you in automatically so you can start using Docker right -away. - -Let's try the “hello world” example. Run - -.. code-block:: bash - - docker run busybox echo hello world - -This will download the small busybox image and print hello world. - - -Observations ------------- - -Persistent storage -`````````````````` - -The virtual machine created above lacks any persistent data storage. All images -and containers will be lost when shutting down or rebooting the VM. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.0.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.0.rst deleted file mode 100644 index fa4b969758..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.0.rst +++ /dev/null @@ -1,1025 +0,0 @@ -.. use orphan to suppress "WARNING: document isn't included in any toctree" -.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata - -:orphan: - -:title: Remote API v1.0 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -====================== -Docker Remote API v1.0 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0" - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0" - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0" - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0" - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/start HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"" - } - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - :query t: repository name to be applied to the resulting image in case of success - :statuscode 200: no error - :statuscode 500: server error - - -Get default username and email -****************************** - -.. http:get:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - GET /auth HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "username":"hannibal", - "email":"hannibal@a-team.com" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Check auth configuration and store it -************************************* - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this first version of the API, some of the endpoints, like /attach, /pull or /push uses hijacking to transport stdin, -stdout and stderr on the same socket. This might change in the future. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.1.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.1.rst deleted file mode 100644 index 92b5039aa6..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.1.rst +++ /dev/null @@ -1,1035 +0,0 @@ -.. use orphan to suppress "WARNING: document isn't included in any toctree" -.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata - -:orphan: - -:title: Remote API v1.1 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -====================== -Docker Remote API v1.1 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0" - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0" - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0" - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0" - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/start HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"" - } - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - :query t: tag to be applied to the resulting image in case of success - :statuscode 200: no error - :statuscode 500: server error - - -Get default username and email -****************************** - -.. http:get:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - GET /auth HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "username":"hannibal", - "email":"hannibal@a-team.com" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Check auth configuration and store it -************************************* - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.2.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.2.rst deleted file mode 100644 index 80f76a3de9..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.2.rst +++ /dev/null @@ -1,1051 +0,0 @@ -.. use orphan to suppress "WARNING: document isn't included in any toctree" -.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata - -:orphan: - -:title: Remote API v1.2 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -====================== -Docker Remote API v1.2 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/start HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Tag":["ubuntu:latest"], - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - {{ authConfig }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 204: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - :query t: repository name to be applied to the resulting image in case of success - :query remote: resource to fetch, as URI - :statuscode 200: no error - :statuscode 500: server error - -{{ STREAM }} is the raw text output of the build command. It uses the HTTP Hijack method in order to stream. - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Status": "Login Succeeded" - } - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 401: unauthorized - :statuscode 403: forbidden - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - - docker -d -H="tcp://192.168.1.9:4243" --api-enable-cors - diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.3.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.3.rst deleted file mode 100644 index 2b17a37a4d..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.3.rst +++ /dev/null @@ -1,1130 +0,0 @@ -.. use orphan to suppress "WARNING: document isn't included in any toctree" -.. per http://sphinx-doc.org/markup/misc.html#file-wide-metadata - -:orphan: - -:title: Remote API v1.3 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -====================== -Docker Remote API v1.3 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "PID":"11935", - "Tty":"pts/2", - "Time":"00:00:00", - "Cmd":"sh" - }, - { - "PID":"12140", - "Tty":"pts/2", - "Time":"00:00:00", - "Cmd":"sleep" - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - {{ authConfig }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - - The stream must be a tar archive compressed with one of the following algorithms: - identity (no compression), gzip, bzip2, xz. The archive must include a file called - `Dockerfile` at its root. It may include any number of other files, which will be - accessible in the build context (See the ADD build command). - - The Content-type header should be set to "application/tar". - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :statuscode 200: no error - :statuscode 500: server error - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "EventsListeners":"0", - "LXCVersion":"0.7.5", - "KernelVersion":"3.8.0-19-generic" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","time":1374067924} - {"status":"start","id":"dfdf82bd3881","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - - docker -d -H="192.168.1.9:4243" --api-enable-cors - diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.4.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.4.rst deleted file mode 100644 index ff5aaa7a74..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.4.rst +++ /dev/null @@ -1,1176 +0,0 @@ -:title: Remote API v1.4 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.4 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":"", - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Privileged": false, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"", - "WorkingDir":"" - - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 409: conflict between containers and images - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict between containers and images - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - {{ authConfig }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - - The stream must be a tar archive compressed with one of the following algorithms: - identity (no compression), gzip, bzip2, xz. The archive must include a file called - `Dockerfile` at its root. It may include any number of other files, which will be - accessible in the build context (See the ADD build command). - - The Content-type header should be set to "application/tar". - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :statuscode 200: no error - :statuscode 500: server error - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors - diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.5.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.5.rst deleted file mode 100644 index d4440e4423..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.5.rst +++ /dev/null @@ -1,1144 +0,0 @@ -:title: Remote API v1.5 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.5 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- Default port in the docker daemon is 4243 -- The API tends to be REST, but for some complex commands, like attach or pull, the HTTP connection is hijacked to transport stdout stdin and stderr - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "ubuntu:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "ubuntu:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "centos:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "fedora:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Privileged": false, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"ubuntu", - "Volumes":{}, - "VolumesFrom":"", - "WorkingDir":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "ubuntu", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"ubuntu", - "Tag":"precise", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - }, - { - "Repository":"ubuntu", - "Tag":"12.04", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - } - ] - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nubuntu",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\ncentos",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\nfedora",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=ubuntu HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/centos/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"centos", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/fedora/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - The ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - The stream must be a tar archive compressed with one of the following algorithms: - identity (no compression), gzip, bzip2, xz. The archive must include a file called - `Dockerfile` at its root. It may include any number of other files, which will be - accessible in the build context (See the ADD build command). - - The Content-type header should be set to "application/tar". - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :query rm: remove intermediate containers after a successful build - :statuscode 200: no error - :statuscode 500: server error - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "PortSpecs":["22"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"ubuntu:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.6.rst deleted file mode 100644 index cfc37084b8..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.6.rst +++ /dev/null @@ -1,1282 +0,0 @@ -:title: Remote API v1.6 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.6 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "ExposedPorts":{}, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{}, - "VolumesFrom":"", - "WorkingDir":"" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :query name: container name to use - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - **More Complex Example request, in 2 steps.** - **First, use create to expose a Private Port, which can be bound back to a Public Port at startup**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Cmd":[ - "/usr/sbin/sshd","-D" - ], - "Image":"image-with-sshd", - "ExposedPorts":{"22/tcp":{}} - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - **Second, start (using the ID returned above) the image we just created, mapping the ssh port 22 to something on the host**: - - .. sourcecode:: http - - POST /containers/e90e34656806/start HTTP/1.1 - Content-Type: application/json - - { - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }]} - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain; charset=utf-8 - Content-Length: 0 - - **Now you can ssh into your new container on port 11022.** - - - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "ExposedPorts": {}, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "ContainerIDFile": "", - "Privileged": false, - "PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]}, - "Links": [], - "PublishAllPorts": false - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/(format) - - List images ``format`` could be json or viz (json default) - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Repository":"base", - "Tag":"ubuntu-12.10", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - }, - { - "Repository":"base", - "Tag":"ubuntu-quantal", - "Id":"b750fe79269d", - "Created":1364102658, - "Size":24653, - "VirtualSize":180116135 - } - ] - - - **Example request**: - - .. sourcecode:: http - - GET /images/viz HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: text/plain - - digraph docker { - "d82cbacda43a" -> "074be284591f" - "1496068ca813" -> "08306dc45919" - "08306dc45919" -> "0e7893146ac2" - "b750fe79269d" -> "1496068ca813" - base -> "27cf78414709" [style=invis] - "f71189fff3de" -> "9a33b36209ed" - "27cf78414709" -> "b750fe79269d" - "0e7893146ac2" -> "d6434d954665" - "d6434d954665" -> "d82cbacda43a" - base -> "e9aa60c60128" [style=invis] - "074be284591f" -> "f71189fff3de" - "b750fe79269d" [label="b750fe79269d\nbase",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "e9aa60c60128" [label="e9aa60c60128\nbase2",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - "9a33b36209ed" [label="9a33b36209ed\ntest",shape=box,fillcolor="paleturquoise",style="filled,rounded"]; - base [style=invisible] - } - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :statuscode 200: no error - :statuscode 500: server error - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "ExposedPorts":{}, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - The ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query registry: the registry you wan to push, optional - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Name":"cespare/sshd", - "Description":"" - }, - { - "Name":"johnfuller/sshd", - "Description":"" - }, - { - "Name":"dhrp/mongodb-sshd", - "Description":"" - } - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - {{ STREAM }} - - - The stream must be a tar archive compressed with one of the following algorithms: - identity (no compression), gzip, bzip2, xz. The archive must include a file called - `Dockerfile` at its root. It may include any number of other files, which will be - accessible in the build context (See the ADD build command). - - The Content-type header should be set to "application/tar". - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :statuscode 200: no error - :statuscode 500: server error - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - Content-Type: application/json - - { - "Cmd": ["cat", "/world"], - "ExposedPorts":{"22/tcp":{}} - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors - diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst deleted file mode 100644 index 7a4f688d8f..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.rst +++ /dev/null @@ -1,1263 +0,0 @@ -:title: Remote API v1.7 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.7 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "VolumesFrom":"", - "WorkingDir":"", - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {} - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "Privileged":false, - "PublishAllPorts":false - } - - Binds need to reference Volumes that were defined during container creation. - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {{ STREAM }} - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :reqheader Content-type: should be set to ``"application/tar"``. - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors - diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst b/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst deleted file mode 100644 index 4f1b266bb6..0000000000 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.rst +++ /dev/null @@ -1,1295 +0,0 @@ -:title: Remote API v1.8 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.8 -====================== - -.. contents:: Table of Contents - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "CpuShares":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "VolumesFrom":"", - "WorkingDir":"", - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam Hostname: Container host name - :jsonparam User: Username or UID - :jsonparam Memory: Memory Limit in bytes - :jsonparam CpuShares: CPU shares (relative weight) - :jsonparam AttachStdin: 1/True/true or 0/False/false, attach to standard input. Default false - :jsonparam AttachStdout: 1/True/true or 0/False/false, attach to standard output. Default false - :jsonparam AttachStderr: 1/True/true or 0/False/false, attach to standard error. Default false - :jsonparam Tty: 1/True/true or 0/False/false, allocate a pseudo-tty. Default false - :jsonparam OpenStdin: 1/True/true or 0/False/false, keep stdin open even if not attached. Default false - :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam Binds: Create a bind mount to a directory or file with [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. - :jsonparam LxcConf: Map of custom lxc options - :jsonparam PortBindings: Expose ports from the container, optionally publishing them via the HostPort flag - :jsonparam PublishAllPorts: 1/True/true or 0/False/false, publish all exposed ports to the host interfaces. Default false - :jsonparam Privileged: 1/True/true or 0/False/false, give extended privileges to this container. Default false - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"stream":"Step 1..."} - {"stream":"..."} - {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} - - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :reqheader Content-type: should be set to ``"application/tar"``. - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_io_accounts_api.rst b/docs/sources/reference/api/docker_io_accounts_api.rst deleted file mode 100644 index 1ce75ca738..0000000000 --- a/docs/sources/reference/api/docker_io_accounts_api.rst +++ /dev/null @@ -1,306 +0,0 @@ -:title: docker.io Accounts API -:description: API Documentation for docker.io accounts. -:keywords: API, Docker, accounts, REST, documentation - - -====================== -docker.io Accounts API -====================== - - -1. Endpoints -============ - - -1.1 Get a single user -^^^^^^^^^^^^^^^^^^^^^ - -.. http:get:: /api/v1.1/users/:username/ - - Get profile info for the specified user. - - :param username: username of the user whose profile info is being requested. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - - :statuscode 200: success, user data returned. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``profile_read`` scope. - :statuscode 404: the specified username does not exist. - - **Example request**: - - .. sourcecode:: http - - GET /api/v1.1/users/janedoe/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id": 2, - "username": "janedoe", - "url": "https://www.docker.io/api/v1.1/users/janedoe/", - "date_joined": "2014-02-12T17:58:01.431312Z", - "type": "User", - "full_name": "Jane Doe", - "location": "San Francisco, CA", - "company": "Success, Inc.", - "profile_url": "https://docker.io/", - "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" - "email": "jane.doe@example.com", - "is_active": true - } - - -1.2 Update a single user -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:patch:: /api/v1.1/users/:username/ - - Update profile info for the specified user. - - :param username: username of the user whose profile info is being updated. - - :jsonparam string full_name: (optional) the new name of the user. - :jsonparam string location: (optional) the new location. - :jsonparam string company: (optional) the new company of the user. - :jsonparam string profile_url: (optional) the new profile url. - :jsonparam string gravatar_email: (optional) the new Gravatar email address. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. - - :statuscode 200: success, user data updated. - :statuscode 400: post data validation error. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``profile_write`` scope. - :statuscode 404: the specified username does not exist. - - **Example request**: - - .. sourcecode:: http - - PATCH /api/v1.1/users/janedoe/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= - - { - "location": "Private Island", - "profile_url": "http://janedoe.com/", - "company": "Retired", - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id": 2, - "username": "janedoe", - "url": "https://www.docker.io/api/v1.1/users/janedoe/", - "date_joined": "2014-02-12T17:58:01.431312Z", - "type": "User", - "full_name": "Jane Doe", - "location": "Private Island", - "company": "Retired", - "profile_url": "http://janedoe.com/", - "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" - "email": "jane.doe@example.com", - "is_active": true - } - - -1.3 List email addresses for a user -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:get:: /api/v1.1/users/:username/emails/ - - List email info for the specified user. - - :param username: username of the user whose profile info is being updated. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token - - :statuscode 200: success, user data updated. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_read`` scope. - :statuscode 404: the specified username does not exist. - - **Example request**: - - .. sourcecode:: http - - GET /api/v1.1/users/janedoe/emails/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "email": "jane.doe@example.com", - "verified": true, - "primary": true - } - ] - - -1.4 Add email address for a user -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:post:: /api/v1.1/users/:username/emails/ - - Add a new email address to the specified user's account. The email address - must be verified separately, a confirmation email is not automatically sent. - - :jsonparam string email: email address to be added. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. - - :statuscode 201: success, new email added. - :statuscode 400: data validation error. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope. - :statuscode 404: the specified username does not exist. - - **Example request**: - - .. sourcecode:: http - - POST /api/v1.1/users/janedoe/emails/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM - - { - "email": "jane.doe+other@example.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 Created - Content-Type: application/json - - { - "email": "jane.doe+other@example.com", - "verified": false, - "primary": false - } - - -1.5 Update an email address for a user -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:patch:: /api/v1.1/users/:username/emails/ - - Update an email address for the specified user to either verify an email - address or set it as the primary email for the user. You cannot use this - endpoint to un-verify an email address. You cannot use this endpoint to - unset the primary email, only set another as the primary. - - :param username: username of the user whose email info is being updated. - - :jsonparam string email: the email address to be updated. - :jsonparam boolean verified: (optional) whether the email address is verified, must be ``true`` or absent. - :jsonparam boolean primary: (optional) whether to set the email address as the primary email, must be ``true`` or absent. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. - - :statuscode 200: success, user's email updated. - :statuscode 400: data validation error. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being updated, OAuth access tokens must have ``email_write`` scope. - :statuscode 404: the specified username or email address does not exist. - - **Example request**: - - Once you have independently verified an email address. - - .. sourcecode:: http - - PATCH /api/v1.1/users/janedoe/emails/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ= - - { - "email": "jane.doe+other@example.com", - "verified": true, - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "email": "jane.doe+other@example.com", - "verified": true, - "primary": false - } - - -1.6 Delete email address for a user -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:delete:: /api/v1.1/users/:username/emails/ - - Delete an email address from the specified user's account. You cannot - delete a user's primary email address. - - :jsonparam string email: email address to be deleted. - - :reqheader Authorization: required authentication credentials of either type HTTP Basic or OAuth Bearer Token. - :reqheader Content-Type: MIME Type of post data. JSON, url-encoded form data, etc. - - :statuscode 204: success, email address removed. - :statuscode 400: validation error. - :statuscode 401: authentication error. - :statuscode 403: permission error, authenticated user must be the user whose data is being requested, OAuth access tokens must have ``email_write`` scope. - :statuscode 404: the specified username or email address does not exist. - - **Example request**: - - .. sourcecode:: http - - DELETE /api/v1.1/users/janedoe/emails/ HTTP/1.1 - Host: www.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Bearer zAy0BxC1wDv2EuF3tGs4HrI6qJp6KoL7nM - - { - "email": "jane.doe+other@example.com" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 NO CONTENT - Content-Length: 0 diff --git a/docs/sources/reference/api/docker_io_oauth_api.rst b/docs/sources/reference/api/docker_io_oauth_api.rst deleted file mode 100644 index 24d2af3adb..0000000000 --- a/docs/sources/reference/api/docker_io_oauth_api.rst +++ /dev/null @@ -1,251 +0,0 @@ -:title: docker.io OAuth API -:description: API Documentation for docker.io's OAuth flow. -:keywords: API, Docker, oauth, REST, documentation - - -=================== -docker.io OAuth API -=================== - - -1. Brief introduction -===================== - -Some docker.io API requests will require an access token to authenticate. To -get an access token for a user, that user must first grant your application -access to their docker.io account. In order for them to grant your application -access you must first register your application. - -Before continuing, we encourage you to familiarize yourself with -`The OAuth 2.0 Authorization Framework `_. - -*Also note that all OAuth interactions must take place over https connections* - - -2. Register Your Application -============================ - -You will need to register your application with docker.io before users will -be able to grant your application access to their account information. We -are currently only allowing applications selectively. To request registration -of your application send an email to support-accounts@docker.com with the -following information: - -- The name of your application -- A description of your application and the service it will provide - to docker.io users. -- A callback URI that we will use for redirecting authorization requests to - your application. These are used in the step of getting an Authorization - Code. The domain name of the callback URI will be visible to the user when - they are requested to authorize your application. - -When your application is approved you will receive a response from the -docker.io team with your ``client_id`` and ``client_secret`` which your -application will use in the steps of getting an Authorization Code and getting -an Access Token. - - -3. Endpoints -============ - -3.1 Get an Authorization Code -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once You have registered you are ready to start integrating docker.io accounts -into your application! The process is usually started by a user following a -link in your application to an OAuth Authorization endpoint. - -.. http:get:: /api/v1.1/o/authorize/ - - Request that a docker.io user authorize your application. If the user is - not already logged in, they will be prompted to login. The user is then - presented with a form to authorize your application for the requested - access scope. On submission, the user will be redirected to the specified - ``redirect_uri`` with an Authorization Code. - - :query client_id: The ``client_id`` given to your application at - registration. - :query response_type: MUST be set to ``code``. This specifies that you - would like an Authorization Code returned. - :query redirect_uri: The URI to redirect back to after the user has - authorized your application. If omitted, the first of your registered - ``response_uris`` is used. If included, it must be one of the URIs - which were submitted when registering your application. - :query scope: The extent of access permissions you are requesting. - Currently, the scope options are ``profile_read``, ``profile_write``, - ``email_read``, and ``email_write``. Scopes must be separated by a - space. If omitted, the default scopes ``profile_read email_read`` are - used. - :query state: (Recommended) Used by your application to maintain state - between the authorization request and callback to protect against CSRF - attacks. - - **Example Request** - - Asking the user for authorization. - - .. sourcecode:: http - - GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1 - Host: www.docker.io - - **Authorization Page** - - When the user follows a link, making the above GET request, they will be - asked to login to their docker.io account if they are not already and then - be presented with the following authorization prompt which asks the user - to authorize your application with a description of the requested scopes. - - .. image:: _static/io_oauth_authorization_page.png - - Once the user allows or denies your Authorization Request the user will be - redirected back to your application. Included in that request will be the - following query parameters: - - ``code`` - The Authorization code generated by the docker.io authorization server. - Present it again to request an Access Token. This code expires in 60 - seconds. - - ``state`` - If the ``state`` parameter was present in the authorization request this - will be the exact value received from that request. - - ``error`` - An error message in the event of the user denying the authorization or - some other kind of error with the request. - - -3.2 Get an Access Token -^^^^^^^^^^^^^^^^^^^^^^^ - -Once the user has authorized your application, a request will be made to your -application's specified ``redirect_uri`` which includes a ``code`` parameter -that you must then use to get an Access Token. - -.. http:post:: /api/v1.1/o/token/ - - Submit your newly granted Authorization Code and your application's - credentials to receive an Access Token and Refresh Token. The code is valid - for 60 seconds and cannot be used more than once. - - :reqheader Authorization: HTTP basic authentication using your - application's ``client_id`` and ``client_secret`` - - :form grant_type: MUST be set to ``authorization_code`` - :form code: The authorization code received from the user's redirect - request. - :form redirect_uri: The same ``redirect_uri`` used in the authentication - request. - - **Example Request** - - Using an authorization code to get an access token. - - .. sourcecode:: http - - POST /api/v1.1/o/token/ HTTP/1.1 - Host: www.docker.io - Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= - Accept: application/json - Content-Type: application/json - - { - "grant_type": "code", - "code": "YXV0aG9yaXphdGlvbl9jb2Rl", - "redirect_uri": "https://my.app/auth_complete/" - } - - **Example Response** - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json;charset=UTF-8 - - { - "username": "janedoe", - "user_id": 42, - "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", - "expires_in": 15552000, - "token_type": "Bearer", - "scope": "profile_read email_read", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" - } - - In the case of an error, there will be a non-200 HTTP Status and and data - detailing the error. - - -3.3 Refresh a Token -^^^^^^^^^^^^^^^^^^^ - -Once the Access Token expires you can use your ``refresh_token`` to have -docker.io issue your application a new Access Token, if the user has not -revoked access from your application. - -.. http:post:: /api/v1.1/o/token/ - - Submit your ``refresh_token`` and application's credentials to receive a - new Access Token and Refresh Token. The ``refresh_token`` can be used - only once. - - :reqheader Authorization: HTTP basic authentication using your - application's ``client_id`` and ``client_secret`` - - :form grant_type: MUST be set to ``refresh_token`` - :form refresh_token: The ``refresh_token`` which was issued to your - application. - :form scope: (optional) The scope of the access token to be returned. - Must not include any scope not originally granted by the user and if - omitted is treated as equal to the scope originally granted. - - **Example Request** - - Refreshing an access token. - - .. sourcecode:: http - - POST /api/v1.1/o/token/ HTTP/1.1 - Host: www.docker.io - Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ= - Accept: application/json - Content-Type: application/json - - { - "grant_type": "refresh_token", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc", - } - - **Example Response** - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json;charset=UTF-8 - - { - "username": "janedoe", - "user_id": 42, - "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS", - "expires_in": 15552000, - "token_type": "Bearer", - "scope": "profile_read email_read", - "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc" - } - - In the case of an error, there will be a non-200 HTTP Status and and data - detailing the error. - - -4. Use an Access Token with the API -=================================== - -Many of the docker.io API requests will require a Authorization request header -field. Simply ensure you add this header with "Bearer <``access_token``>": - -.. sourcecode:: http - - GET /api/v1.1/resource HTTP/1.1 - Host: docker.io - Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst deleted file mode 100644 index 1e90b1bbe3..0000000000 --- a/docs/sources/reference/api/docker_remote_api.rst +++ /dev/null @@ -1,404 +0,0 @@ -:title: Remote API -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -.. COMMENT use https://pythonhosted.org/sphinxcontrib-httpdomain/ to -.. document the REST API. - -================= -Docker Remote API -================= - - -1. Brief introduction -===================== - -- The Remote API is replacing rcli -- By default the Docker daemon listens on unix:///var/run/docker.sock and the client must have root access to interact with the daemon -- If a group named *docker* exists on your system, docker will apply ownership of the socket to the group -- The API tends to be REST, but for some complex commands, like attach - or pull, the HTTP connection is hijacked to transport stdout stdin - and stderr -- Since API version 1.2, the auth configuration is now handled client - side, so the client has to send the authConfig as POST in - /images/(name)/push -- authConfig, set as the ``X-Registry-Auth`` header, is currently a Base64 encoded (json) string with credentials: - ``{'username': string, 'password': string, 'email': string, 'serveraddress' : string}`` - -2. Versions -=========== - -The current version of the API is 1.11 - -Calling /images//insert is the same as calling -/v1.11/images//insert - -You can still call an old version of the api using -/v1.11/images//insert - - -v1.11 -***** - -Full Documentation ------------------- - -:doc:`docker_remote_api_v1.11` - -What's new ----------- - -.. http:get:: /events - - **New!** You can now use the ``-until`` parameter to close connection after timestamp. - -v1.10 -***** - -Full Documentation ------------------- - -:doc:`docker_remote_api_v1.10` - -What's new ----------- - -.. http:delete:: /images/(name) - - **New!** You can now use the force parameter to force delete of an image, even if it's - tagged in multiple repositories. - **New!** You can now use the noprune parameter to prevent the deletion of parent images - -.. http:delete:: /containers/(id) - - **New!** You can now use the force paramter to force delete a container, even if - it is currently running - -v1.9 -**** - -Full Documentation ------------------- - -:doc:`docker_remote_api_v1.9` - -What's new ----------- - -.. http:post:: /build - - **New!** This endpoint now takes a serialized ConfigFile which it uses to - resolve the proper registry auth credentials for pulling the base image. - Clients which previously implemented the version accepting an AuthConfig - object must be updated. - -v1.8 -**** - -Full Documentation ------------------- - -What's new ----------- - -.. http:post:: /build - - **New!** This endpoint now returns build status as json stream. In case - of a build error, it returns the exit status of the failed command. - -.. http:get:: /containers/(id)/json - - **New!** This endpoint now returns the host config for the container. - -.. http:post:: /images/create -.. http:post:: /images/(name)/insert -.. http:post:: /images/(name)/push - - **New!** progressDetail object was added in the JSON. It's now possible - to get the current value and the total of the progress without having to - parse the string. - -v1.7 -**** - -Full Documentation ------------------- - -What's new ----------- - -.. http:get:: /images/json - - The format of the json returned from this uri changed. Instead of an entry - for each repo/tag on an image, each image is only represented once, with a - nested attribute indicating the repo/tags that apply to that image. - - Instead of: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "12.04", - "Repository": "ubuntu" - }, - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "latest", - "Repository": "ubuntu" - }, - { - "VirtualSize": 131506275, - "Size": 131506275, - "Created": 1365714795, - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Tag": "precise", - "Repository": "ubuntu" - }, - { - "VirtualSize": 180116135, - "Size": 24653, - "Created": 1364102658, - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Tag": "12.10", - "Repository": "ubuntu" - }, - { - "VirtualSize": 180116135, - "Size": 24653, - "Created": 1364102658, - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Tag": "quantal", - "Repository": "ubuntu" - } - ] - - The returned json looks like this: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - -.. http:get:: /images/viz - - This URI no longer exists. The ``images --viz`` output is now generated in - the client, using the ``/images/json`` data. - -v1.6 -**** - -Full Documentation ------------------- - -What's new ----------- - -.. http:post:: /containers/(id)/attach - - **New!** You can now split stderr from stdout. This is done by prefixing - a header to each transmition. See :http:post:`/containers/(id)/attach`. - The WebSocket attach is unchanged. - Note that attach calls on the previous API version didn't change. Stdout and - stderr are merged. - - -v1.5 -**** - -Full Documentation ------------------- - -What's new ----------- - -.. http:post:: /images/create - - **New!** You can now pass registry credentials (via an AuthConfig object) - through the `X-Registry-Auth` header - -.. http:post:: /images/(name)/push - - **New!** The AuthConfig object now needs to be passed through - the `X-Registry-Auth` header - -.. http:get:: /containers/json - - **New!** The format of the `Ports` entry has been changed to a list of - dicts each containing `PublicPort`, `PrivatePort` and `Type` describing a - port mapping. - -v1.4 -**** - -Full Documentation ------------------- - -What's new ----------- - -.. http:post:: /images/create - - **New!** When pulling a repo, all images are now downloaded in parallel. - -.. http:get:: /containers/(id)/top - - **New!** You can now use ps args with docker top, like `docker top aux` - -.. http:get:: /events: - - **New!** Image's name added in the events - -v1.3 -**** - -docker v0.5.0 51f6c4a_ - -Full Documentation ------------------- - -What's new ----------- - -.. http:get:: /containers/(id)/top - - List the processes running inside a container. - -.. http:get:: /events: - - **New!** Monitor docker's events via streaming or via polling - -Builder (/build): - -- Simplify the upload of the build context -- Simply stream a tarball instead of multipart upload with 4 - intermediary buffers -- Simpler, less memory usage, less disk usage and faster - -.. Warning:: - - The /build improvements are not reverse-compatible. Pre 1.3 clients - will break on /build. - -List containers (/containers/json): - -- You can use size=1 to get the size of the containers - -Start containers (/containers//start): - -- You can now pass host-specific configuration (e.g. bind mounts) in - the POST body for start calls - -v1.2 -**** - -docker v0.4.2 2e7649b_ - -Full Documentation ------------------- - -What's new ----------- - -The auth configuration is now handled by the client. - -The client should send it's authConfig as POST on each call of -/images/(name)/push - -.. http:get:: /auth - - **Deprecated.** - -.. http:post:: /auth - - Only checks the configuration but doesn't store it on the server - - Deleting an image is now improved, will only untag the image if it - has children and remove all the untagged parents if has any. - -.. http:post:: /images//delete - - Now returns a JSON structure with the list of images - deleted/untagged. - - -v1.1 -**** - -docker v0.4.0 a8ae398_ - -Full Documentation ------------------- - -What's new ----------- - -.. http:post:: /images/create -.. http:post:: /images/(name)/insert -.. http:post:: /images/(name)/push - - Uses json stream instead of HTML hijack, it looks like this: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)"} - {"error":"Invalid..."} - ... - -v1.0 -**** - -docker v0.3.4 8d73740_ - -Full Documentation ------------------- - -What's new ----------- - -Initial version - - -.. _a8ae398: https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f -.. _8d73740: https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4 -.. _2e7649b: https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168 -.. _51f6c4a: https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909 diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst deleted file mode 100644 index 8635ec4826..0000000000 --- a/docs/sources/reference/api/docker_remote_api_v1.10.rst +++ /dev/null @@ -1,1280 +0,0 @@ -:title: Remote API v1.10 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -======================= -Docker Remote API v1.10 -======================= - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "WorkingDir":"", - "DisableNetwork": false, - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false - "Dns": ["8.8.8.8"], - "VolumesFrom: ["parent", "other:ro"] - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :query force: 1/True/true or 0/False/false, Removes the container even if it was running. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - "Image":"base", - "Volumes":null, - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :query force: 1/True/true or 0/False/false, default false - :query noprune: 1/True/true or 0/False/false, default false - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"stream":"Step 1..."} - {"stream":"..."} - {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} - - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :reqheader Content-type: should be set to ``"application/tar"``. - :reqheader X-Registry-Config: base64-encoded ConfigFile object - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.rst b/docs/sources/reference/api/docker_remote_api_v1.11.rst deleted file mode 100644 index d66b4b1410..0000000000 --- a/docs/sources/reference/api/docker_remote_api_v1.11.rst +++ /dev/null @@ -1,1285 +0,0 @@ -:title: Remote API v1.11 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -======================= -Docker Remote API v1.11 -======================= - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "VolumesFrom":"", - "WorkingDir":"", - "DisableNetwork": false, - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam config: the container's configuration - :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam hostConfig: the container's host configuration (optional) - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :query force: 1/True/true or 0/False/false, Removes the container even if it was running. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :query force: 1/True/true or 0/False/false, default false - :query noprune: 1/True/true or 0/False/false, default false - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile via stdin -**************************************** - -.. http:post:: /build - - Build an image from Dockerfile via stdin - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"stream":"Step 1..."} - {"stream":"..."} - {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} - - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :reqheader Content-type: should be set to ``"application/tar"``. - :reqheader X-Registry-Config: base64-encoded ConfigFile object - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :query until: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst deleted file mode 100644 index db0e3bfdae..0000000000 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ /dev/null @@ -1,1294 +0,0 @@ -:title: Remote API v1.9 -:description: API Documentation for Docker -:keywords: API, Docker, rcli, REST, documentation - -:orphan: - -====================== -Docker Remote API v1.9 -====================== - -1. Brief introduction -===================== - -- The Remote API has replaced rcli -- The daemon listens on ``unix:///var/run/docker.sock``, but you can - :ref:`bind_docker`. -- The API tends to be REST, but for some complex commands, like - ``attach`` or ``pull``, the HTTP connection is hijacked to transport - ``stdout, stdin`` and ``stderr`` - -2. Endpoints -============ - -2.1 Containers --------------- - -List containers -*************** - -.. http:get:: /containers/json - - List containers - - **Example request**: - - .. sourcecode:: http - - GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Created": 1367854152, - "Status": "Exit 0", - "Ports":[], - "SizeRw":12288, - "SizeRootFs":0 - } - ] - - :query all: 1/True/true or 0/False/false, Show all containers. Only running containers are shown by default - :query limit: Show ``limit`` last created containers, include non-running ones. - :query since: Show only containers created since Id, include non-running ones. - :query before: Show only containers created before Id, include non-running ones. - :query size: 1/True/true or 0/False/false, Show the containers sizes - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 500: server error - - -Create a container -****************** - -.. http:post:: /containers/create - - Create a container - - **Example request**: - - .. sourcecode:: http - - POST /containers/create HTTP/1.1 - Content-Type: application/json - - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "CpuShares":0, - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "date" - ], - "Dns":null, - "Image":"base", - "Volumes":{ - "/tmp": {} - }, - "VolumesFrom":"", - "WorkingDir":"", - "ExposedPorts":{ - "22/tcp": {} - } - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/json - - { - "Id":"e90e34656806" - "Warnings":[] - } - - :jsonparam Hostname: Container host name - :jsonparam User: Username or UID - :jsonparam Memory: Memory Limit in bytes - :jsonparam CpuShares: CPU shares (relative weight) - :jsonparam AttachStdin: 1/True/true or 0/False/false, attach to standard input. Default false - :jsonparam AttachStdout: 1/True/true or 0/False/false, attach to standard output. Default false - :jsonparam AttachStderr: 1/True/true or 0/False/false, attach to standard error. Default false - :jsonparam Tty: 1/True/true or 0/False/false, allocate a pseudo-tty. Default false - :jsonparam OpenStdin: 1/True/true or 0/False/false, keep stdin open even if not attached. Default false - :query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``. - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 406: impossible to attach (container not running) - :statuscode 500: server error - - -Inspect a container -******************* - -.. http:get:: /containers/(id)/json - - Return low-level information on the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.041847+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Dns": null, - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "WorkingDir":"" - - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:01360", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } - } - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -List processes running inside a container -***************************************** - -.. http:get:: /containers/(id)/top - - List processes running inside the container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/top HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Titles":[ - "USER", - "PID", - "%CPU", - "%MEM", - "VSZ", - "RSS", - "TTY", - "STAT", - "START", - "TIME", - "COMMAND" - ], - "Processes":[ - ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"], - ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"] - ] - } - - :query ps_args: ps arguments to use (eg. aux) - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Inspect changes on a container's filesystem -******************************************* - -.. http:get:: /containers/(id)/changes - - Inspect changes on container ``id`` 's filesystem - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/changes HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } - ] - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Export a container -****************** - -.. http:get:: /containers/(id)/export - - Export the contents of container ``id`` - - **Example request**: - - .. sourcecode:: http - - GET /containers/4fa6e0f0c678/export HTTP/1.1 - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Start a container -***************** - -.. http:post:: /containers/(id)/start - - Start the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/(id)/start HTTP/1.1 - Content-Type: application/json - - { - "Binds":["/tmp:/tmp"], - "LxcConf":{"lxc.utsname":"docker"}, - "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts":false, - "Privileged":false - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 No Content - Content-Type: text/plain - - :jsonparam Binds: Create a bind mount to a directory or file with [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. - :jsonparam LxcConf: Map of custom lxc options - :jsonparam PortBindings: Expose ports from the container, optionally publishing them via the HostPort flag - :jsonparam PublishAllPorts: 1/True/true or 0/False/false, publish all exposed ports to the host interfaces. Default false - :jsonparam Privileged: 1/True/true or 0/False/false, give extended privileges to this container. Default false - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Stop a container -**************** - -.. http:post:: /containers/(id)/stop - - Stop the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/stop?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Restart a container -******************* - -.. http:post:: /containers/(id)/restart - - Restart the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/restart?t=5 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query t: number of seconds to wait before killing the container - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Kill a container -**************** - -.. http:post:: /containers/(id)/kill - - Kill the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/e90e34656806/kill HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit. - :statuscode 204: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Attach to a container -********************* - -.. http:post:: /containers/(id)/attach - - Attach to the container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - {{ STREAM }} - - :query logs: 1/True/true or 0/False/false, return logs. Default false - :query stream: 1/True/true or 0/False/false, return stream. Default false - :query stdin: 1/True/true or 0/False/false, if stream=true, attach to stdin. Default false - :query stdout: 1/True/true or 0/False/false, if logs=true, return stdout log, if stream=true, attach to stdout. Default false - :query stderr: 1/True/true or 0/False/false, if logs=true, return stderr log, if stream=true, attach to stderr. Default false - :statuscode 200: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - **Stream details**: - - When using the TTY setting is enabled in - :http:post:`/containers/create`, the stream is the raw data - from the process PTY and client's stdin. When the TTY is - disabled, then the stream is multiplexed to separate stdout - and stderr. - - The format is a **Header** and a **Payload** (frame). - - **HEADER** - - The header will contain the information on which stream write - the stream (stdout or stderr). It also contain the size of - the associated frame encoded on the last 4 bytes (uint32). - - It is encoded on the first 8 bytes like this:: - - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - - ``STREAM_TYPE`` can be: - - - 0: stdin (will be writen on stdout) - - 1: stdout - - 2: stderr - - ``SIZE1, SIZE2, SIZE3, SIZE4`` are the 4 bytes of the uint32 size encoded as big endian. - - **PAYLOAD** - - The payload is the raw stream. - - **IMPLEMENTATION** - - The simplest way to implement the Attach protocol is the following: - - 1) Read 8 bytes - 2) chose stdout or stderr depending on the first byte - 3) Extract the frame size from the last 4 byets - 4) Read the extracted size and output it on the correct output - 5) Goto 1) - - - -Wait a container -**************** - -.. http:post:: /containers/(id)/wait - - Block until container ``id`` stops, then returns the exit code - - **Example request**: - - .. sourcecode:: http - - POST /containers/16253994b7c4/wait HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"StatusCode":0} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Remove a container -******************* - -.. http:delete:: /containers/(id) - - Remove the container ``id`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /containers/16253994b7c4?v=1 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 204 OK - - :query v: 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false - :statuscode 204: no error - :statuscode 400: bad parameter - :statuscode 404: no such container - :statuscode 500: server error - - -Copy files or folders from a container -************************************** - -.. http:post:: /containers/(id)/copy - - Copy files or folders of container ``id`` - - **Example request**: - - .. sourcecode:: http - - POST /containers/4fa6e0f0c678/copy HTTP/1.1 - Content-Type: application/json - - { - "Resource":"test.txt" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/octet-stream - - {{ STREAM }} - - :statuscode 200: no error - :statuscode 404: no such container - :statuscode 500: server error - - -2.2 Images ----------- - -List Images -*********** - -.. http:get:: /images/json - - **Example request**: - - .. sourcecode:: http - - GET /images/json?all=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "RepoTags": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTags": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } - ] - - -Create an image -*************** - -.. http:post:: /images/create - - Create an image, either by pull it from the registry or by importing it - - **Example request**: - - .. sourcecode:: http - - POST /images/create?fromImage=base HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - {"error":"Invalid..."} - ... - - When using this endpoint to pull an image from the registry, - the ``X-Registry-Auth`` header can be used to include a - base64-encoded AuthConfig object. - - :query fromImage: name of the image to pull - :query fromSrc: source to import, - means stdin - :query repo: repository - :query tag: tag - :query registry: the registry to pull from - :reqheader X-Registry-Auth: base64-encoded AuthConfig object - :statuscode 200: no error - :statuscode 500: server error - - - -Insert a file in an image -************************* - -.. http:post:: /images/(name)/insert - - Insert a file from ``url`` in the image ``name`` at ``path`` - - **Example request**: - - .. sourcecode:: http - - POST /images/test/insert?path=/usr&url=myurl HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Inserting..."} - {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}} - {"error":"Invalid..."} - ... - - :statuscode 200: no error - :statuscode 500: server error - - -Inspect an image -**************** - -.. http:get:: /images/(name)/json - - Return low-level information on the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/json HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "parent":"27cf784147099545", - "created":"2013-03-23T22:24:18.818426-07:00", - "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "container_config": - { - "Hostname":"", - "User":"", - "Memory":0, - "MemorySwap":0, - "AttachStdin":false, - "AttachStdout":false, - "AttachStderr":false, - "PortSpecs":null, - "Tty":true, - "OpenStdin":true, - "StdinOnce":false, - "Env":null, - "Cmd": ["/bin/bash"] - ,"Dns":null, - "Image":"base", - "Volumes":null, - "VolumesFrom":"", - "WorkingDir":"" - }, - "Size": 6824592 - } - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Get the history of an image -*************************** - -.. http:get:: /images/(name)/history - - Return the history of the image ``name`` - - **Example request**: - - .. sourcecode:: http - - GET /images/base/history HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "Id":"b750fe79269d", - "Created":1364102658, - "CreatedBy":"/bin/bash" - }, - { - "Id":"27cf78414709", - "Created":1364068391, - "CreatedBy":"" - } - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Push an image on the registry -***************************** - -.. http:post:: /images/(name)/push - - Push the image ``name`` on the registry - - **Example request**: - - .. sourcecode:: http - - POST /images/test/push HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"error":"Invalid..."} - ... - - :query registry: the registry you wan to push, optional - :reqheader X-Registry-Auth: include a base64-encoded AuthConfig object. - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 500: server error - - -Tag an image into a repository -****************************** - -.. http:post:: /images/(name)/tag - - Tag the image ``name`` into a repository - - **Example request**: - - .. sourcecode:: http - - POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - - :query repo: The repository to tag in - :query force: 1/True/true or 0/False/false, default false - :statuscode 201: no error - :statuscode 400: bad parameter - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Remove an image -*************** - -.. http:delete:: /images/(name) - - Remove the image ``name`` from the filesystem - - **Example request**: - - .. sourcecode:: http - - DELETE /images/test HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-type: application/json - - [ - {"Untagged":"3e2f21a89f"}, - {"Deleted":"3e2f21a89f"}, - {"Deleted":"53b4f83ac9"} - ] - - :statuscode 200: no error - :statuscode 404: no such image - :statuscode 409: conflict - :statuscode 500: server error - - -Search images -************* - -.. http:get:: /images/search - - Search for an image in the docker index. - - .. note:: - - The response keys have changed from API v1.6 to reflect the JSON - sent by the registry server to the docker daemon's request. - - **Example request**: - - .. sourcecode:: http - - GET /images/search?term=sshd HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - [ - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "wma55/u1210sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "jdswinbank/sshd", - "star_count": 0 - }, - { - "description": "", - "is_official": false, - "is_trusted": false, - "name": "vgauthier/sshd", - "star_count": 0 - } - ... - ] - - :query term: term to search - :statuscode 200: no error - :statuscode 500: server error - - -2.3 Misc --------- - -Build an image from Dockerfile -****************************** - -.. http:post:: /build - - Build an image from Dockerfile using a POST body. - - **Example request**: - - .. sourcecode:: http - - POST /build HTTP/1.1 - - {{ STREAM }} - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"stream":"Step 1..."} - {"stream":"..."} - {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}} - - - The stream must be a tar archive compressed with one of the - following algorithms: identity (no compression), gzip, bzip2, - xz. - - The archive must include a file called ``Dockerfile`` at its - root. It may include any number of other files, which will be - accessible in the build context (See the :ref:`ADD build command - `). - - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :query rm: Remove intermediate containers after a successful build - :reqheader Content-type: should be set to ``"application/tar"``. - :reqheader X-Registry-Config: base64-encoded ConfigFile object - :statuscode 200: no error - :statuscode 500: server error - - - -Check auth configuration -************************ - -.. http:post:: /auth - - Get the default username and email - - **Example request**: - - .. sourcecode:: http - - POST /auth HTTP/1.1 - Content-Type: application/json - - { - "username":"hannibal", - "password:"xxxx", - "email":"hannibal@a-team.com", - "serveraddress":"https://index.docker.io/v1/" - } - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 204: no error - :statuscode 500: server error - - -Display system-wide information -******************************* - -.. http:get:: /info - - Display system-wide information - - **Example request**: - - .. sourcecode:: http - - GET /info HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Containers":11, - "Images":16, - "Debug":false, - "NFd": 11, - "NGoroutines":21, - "MemoryLimit":true, - "SwapLimit":false, - "IPv4Forwarding":true - } - - :statuscode 200: no error - :statuscode 500: server error - - -Show the docker version information -*********************************** - -.. http:get:: /version - - Show the docker version information - - **Example request**: - - .. sourcecode:: http - - GET /version HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - { - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" - } - - :statuscode 200: no error - :statuscode 500: server error - - -Create a new image from a container's changes -********************************************* - -.. http:post:: /commit - - Create a new image from a container's changes - - **Example request**: - - .. sourcecode:: http - - POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Content-Type: application/vnd.docker.raw-stream - - {"Id":"596069db4bf5"} - - :query container: source container - :query repo: repository - :query tag: tag - :query m: commit message - :query author: author (eg. "John Hannibal Smith ") - :query run: config automatically applied when the image is run. (ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]}) - :statuscode 201: no error - :statuscode 404: no such container - :statuscode 500: server error - - -Monitor Docker's events -*********************** - -.. http:get:: /events - - Get events from docker, either in real time via streaming, or via polling (using `since`) - - **Example request**: - - .. sourcecode:: http - - GET /events?since=1374067924 - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/json - - {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} - {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} - {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} - - :query since: timestamp used for polling - :statuscode 200: no error - :statuscode 500: server error - -Get a tarball containing all images and tags in a repository -************************************************************ - -.. http:get:: /images/(name)/get - - Get a tarball containing all images and metadata for the repository specified by ``name``. - - **Example request** - - .. sourcecode:: http - - GET /images/ubuntu/get - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Content-Type: application/x-tar - - Binary data stream - - :statuscode 200: no error - :statuscode 500: server error - -Load a tarball with a set of images and tags into docker -******************************************************** - -.. http:post:: /images/load - - Load a set of images and tags into the docker repository. - - **Example request** - - .. sourcecode:: http - - POST /images/load - - Tarball in body - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - - :statuscode 200: no error - :statuscode 500: server error - -3. Going further -================ - -3.1 Inside 'docker run' ------------------------ - -Here are the steps of 'docker run' : - -* Create the container -* If the status code is 404, it means the image doesn't exists: - * Try to pull it - * Then retry to create the container -* Start the container -* If you are not in detached mode: - * Attach to the container, using logs=1 (to have stdout and stderr from the container's start) and stream=1 -* If in detached mode or only stdin is attached: - * Display the container's id - - -3.2 Hijacking -------------- - -In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. - -3.3 CORS Requests ------------------ - -To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - -.. code-block:: bash - - docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/index.rst b/docs/sources/reference/api/index.rst deleted file mode 100644 index 3c84a505c6..0000000000 --- a/docs/sources/reference/api/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -:title: API Documentation -:description: docker documentation -:keywords: docker, ipa, documentation - -APIs -==== - -Your programs and scripts can access Docker's functionality via these interfaces: - -.. toctree:: - :maxdepth: 3 - - registry_index_spec - registry_api - index_api - docker_remote_api - remote_api_client_libraries - docker_io_oauth_api - docker_io_accounts_api - diff --git a/docs/sources/reference/api/index_api.rst b/docs/sources/reference/api/index_api.rst deleted file mode 100644 index 5191fc8992..0000000000 --- a/docs/sources/reference/api/index_api.rst +++ /dev/null @@ -1,556 +0,0 @@ -:title: Index API -:description: API Documentation for Docker Index -:keywords: API, Docker, index, REST, documentation - -================= -Docker Index API -================= - -1. Brief introduction -===================== - -- This is the REST API for the Docker index -- Authorization is done with basic auth over SSL -- Not all commands require authentication, only those noted as such. - -2. Endpoints -============ - -2.1 Repository -^^^^^^^^^^^^^^ - -Repositories -************* - -User Repo -~~~~~~~~~ - -.. http:put:: /v1/repositories/(namespace)/(repo_name)/ - - Create a user repository with the given ``namespace`` and ``repo_name``. - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foo/bar/ HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - X-Docker-Token: true - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}] - - :parameter namespace: the namespace for the repo - :parameter repo_name: the name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=write - X-Docker-Token: signature=123abc,repository="foo/bar",access=write - X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io] - - "" - - :statuscode 200: Created - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - - -.. http:delete:: /v1/repositories/(namespace)/(repo_name)/ - - Delete a user repository with the given ``namespace`` and ``repo_name``. - - **Example Request**: - - .. sourcecode:: http - - DELETE /v1/repositories/foo/bar/ HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - X-Docker-Token: true - - "" - - :parameter namespace: the namespace for the repo - :parameter repo_name: the name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 202 - Vary: Accept - Content-Type: application/json - WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=delete - X-Docker-Token: signature=123abc,repository="foo/bar",access=delete - X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io] - - "" - - :statuscode 200: Deleted - :statuscode 202: Accepted - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - -Library Repo -~~~~~~~~~~~~ - -.. http:put:: /v1/repositories/(repo_name)/ - - Create a library repository with the given ``repo_name``. - This is a restricted feature only available to docker admins. - - When namespace is missing, it is assumed to be ``library`` - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foobar/ HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - X-Docker-Token: true - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}] - - :parameter repo_name: the library name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=write - X-Docker-Token: signature=123abc,repository="foo/bar",access=write - X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io] - - "" - - :statuscode 200: Created - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - -.. http:delete:: /v1/repositories/(repo_name)/ - - Delete a library repository with the given ``repo_name``. - This is a restricted feature only available to docker admins. - - When namespace is missing, it is assumed to be ``library`` - - **Example Request**: - - .. sourcecode:: http - - DELETE /v1/repositories/foobar/ HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - X-Docker-Token: true - - "" - - :parameter repo_name: the library name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 202 - Vary: Accept - Content-Type: application/json - WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=delete - X-Docker-Token: signature=123abc,repository="foo/bar",access=delete - X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io] - - "" - - :statuscode 200: Deleted - :statuscode 202: Accepted - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - -Repository Images -***************** - -User Repo Images -~~~~~~~~~~~~~~~~ - -.. http:put:: /v1/repositories/(namespace)/(repo_name)/images - - Update the images for a user repo. - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foo/bar/images HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}] - - :parameter namespace: the namespace for the repo - :parameter repo_name: the name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 204 - Vary: Accept - Content-Type: application/json - - "" - - :statuscode 204: Created - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active or permission denied - - -.. http:get:: /v1/repositories/(namespace)/(repo_name)/images - - get the images for a user repo. - - **Example Request**: - - .. sourcecode:: http - - GET /v1/repositories/foo/bar/images HTTP/1.1 - Host: index.docker.io - Accept: application/json - - :parameter namespace: the namespace for the repo - :parameter repo_name: the name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}, - {"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds", - "checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}] - - :statuscode 200: OK - :statuscode 404: Not found - -Library Repo Images -~~~~~~~~~~~~~~~~~~~ - -.. http:put:: /v1/repositories/(repo_name)/images - - Update the images for a library repo. - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foobar/images HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}] - - :parameter repo_name: the library name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 204 - Vary: Accept - Content-Type: application/json - - "" - - :statuscode 204: Created - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active or permission denied - - -.. http:get:: /v1/repositories/(repo_name)/images - - get the images for a library repo. - - **Example Request**: - - .. sourcecode:: http - - GET /v1/repositories/foobar/images HTTP/1.1 - Host: index.docker.io - Accept: application/json - - :parameter repo_name: the library name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - - [{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}, - {"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds", - "checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}] - - :statuscode 200: OK - :statuscode 404: Not found - - -Repository Authorization -************************ - -Library Repo -~~~~~~~~~~~~ - -.. http:put:: /v1/repositories/(repo_name)/auth - - authorize a token for a library repo - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foobar/auth HTTP/1.1 - Host: index.docker.io - Accept: application/json - Authorization: Token signature=123abc,repository="library/foobar",access=write - - :parameter repo_name: the library name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - - "OK" - - :statuscode 200: OK - :statuscode 403: Permission denied - :statuscode 404: Not found - - -User Repo -~~~~~~~~~ - -.. http:put:: /v1/repositories/(namespace)/(repo_name)/auth - - authorize a token for a user repo - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foo/bar/auth HTTP/1.1 - Host: index.docker.io - Accept: application/json - Authorization: Token signature=123abc,repository="foo/bar",access=write - - :parameter namespace: the namespace for the repo - :parameter repo_name: the name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - - "OK" - - :statuscode 200: OK - :statuscode 403: Permission denied - :statuscode 404: Not found - - -2.2 Users -^^^^^^^^^ - -User Login -********** - -.. http:get:: /v1/users - - If you want to check your login, you can try this endpoint - - **Example Request**: - - .. sourcecode:: http - - GET /v1/users HTTP/1.1 - Host: index.docker.io - Accept: application/json - Authorization: Basic akmklmasadalkm== - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Vary: Accept - Content-Type: application/json - - OK - - :statuscode 200: no error - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - - -User Register -************* - -.. http:post:: /v1/users - - Registering a new account. - - **Example request**: - - .. sourcecode:: http - - POST /v1/users HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - - {"email": "sam@dotcloud.com", - "password": "toto42", - "username": "foobar"'} - - :jsonparameter email: valid email address, that needs to be confirmed - :jsonparameter username: min 4 character, max 30 characters, must match the regular expression [a-z0-9\_]. - :jsonparameter password: min 5 characters - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 201 OK - Vary: Accept - Content-Type: application/json - - "User Created" - - :statuscode 201: User Created - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - -Update User -*********** - -.. http:put:: /v1/users/(username)/ - - Change a password or email address for given user. If you pass in an email, - it will add it to your account, it will not remove the old one. Passwords will - be updated. - - It is up to the client to verify that that password that is sent is the one that - they want. Common approach is to have them type it twice. - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/users/fakeuser/ HTTP/1.1 - Host: index.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Basic akmklmasadalkm== - - {"email": "sam@dotcloud.com", - "password": "toto42"} - - :parameter username: username for the person you want to update - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 204 - Vary: Accept - Content-Type: application/json - - "" - - :statuscode 204: User Updated - :statuscode 400: Errors (invalid json, missing or invalid fields, etc) - :statuscode 401: Unauthorized - :statuscode 403: Account is not Active - :statuscode 404: User not found - - -2.3 Search -^^^^^^^^^^ -If you need to search the index, this is the endpoint you would use. - -Search -****** - -.. http:get:: /v1/search - - Search the Index given a search term. It accepts :http:method:`get` only. - - **Example request**: - - .. sourcecode:: http - - GET /v1/search?q=search_term HTTP/1.1 - Host: example.com - Accept: application/json - - - **Example response**: - - .. sourcecode:: http - - HTTP/1.1 200 OK - Vary: Accept - Content-Type: application/json - - {"query":"search_term", - "num_results": 3, - "results" : [ - {"name": "ubuntu", "description": "An ubuntu image..."}, - {"name": "centos", "description": "A centos image..."}, - {"name": "fedora", "description": "A fedora image..."} - ] - } - - :query q: what you want to search for - :statuscode 200: no error - :statuscode 500: server error diff --git a/docs/sources/reference/api/registry_api.rst b/docs/sources/reference/api/registry_api.rst deleted file mode 100644 index b5c36cc344..0000000000 --- a/docs/sources/reference/api/registry_api.rst +++ /dev/null @@ -1,504 +0,0 @@ -:title: Registry API -:description: API Documentation for Docker Registry -:keywords: API, Docker, index, registry, REST, documentation - -=================== -Docker Registry API -=================== - - -1. Brief introduction -===================== - -- This is the REST API for the Docker Registry -- It stores the images and the graph for a set of repositories -- It does not have user accounts data -- It has no notion of user accounts or authorization -- It delegates authentication and authorization to the Index Auth service using tokens -- It supports different storage backends (S3, cloud files, local FS) -- It doesn’t have a local database -- It will be open-sourced at some point - -We expect that there will be multiple registries out there. To help to grasp -the context, here are some examples of registries: - -- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index. -- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally. -- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution. -- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory. - -.. note:: - - Mirror registries and private registries which do not use the Index don’t even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server. - -.. note:: - - The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial): - - HTTP with GET (and PUT for read-write registries); - - local mount point; - - remote docker addressed through SSH. - -The latter would only require two new commands in docker, e.g. ``registryget`` -and ``registryput``, wrapping access to the local filesystem (and optionally -doing consistency checks). Authentication and authorization are then delegated -to SSH (e.g. with public keys). - -2. Endpoints -============ - -2.1 Images ----------- - -Layer -***** - -.. http:get:: /v1/images/(image_id)/layer - - get image layer for a given ``image_id`` - - **Example Request**: - - .. sourcecode:: http - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Authorization: Token signature=123abc,repository="foo/bar",access=read - - :parameter image_id: the id for the layer you want to get - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - - {layer binary data stream} - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Image not found - - -.. http:put:: /v1/images/(image_id)/layer - - put image layer for a given ``image_id`` - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/layer HTTP/1.1 - Host: registry-1.docker.io - Transfer-Encoding: chunked - Authorization: Token signature=123abc,repository="foo/bar",access=write - - {layer binary data stream} - - :parameter image_id: the id for the layer you want to get - - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Image not found - - -Image -***** - -.. http:put:: /v1/images/(image_id)/json - - put image for a given ``image_id`` - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - { - id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", - parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", - created: "2013-04-30T17:46:10.843673+03:00", - container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", - container_config: { - Hostname: "host-test", - User: "", - Memory: 0, - MemorySwap: 0, - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - PortSpecs: null, - Tty: false, - OpenStdin: false, - StdinOnce: false, - Env: null, - Cmd: [ - "/bin/bash", - "-c", - "apt-get -q -yy -f install libevent-dev" - ], - Dns: null, - Image: "imagename/blah", - Volumes: { }, - VolumesFrom: "" - }, - docker_version: "0.1.7" - } - - :parameter image_id: the id for the layer you want to get - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - :statuscode 401: Requires authorization - -.. http:get:: /v1/images/(image_id)/json - - get image for a given ``image_id`` - - **Example Request**: - - .. sourcecode:: http - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/json HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - :parameter image_id: the id for the layer you want to get - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - X-Docker-Size: 456789 - X-Docker-Checksum: b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087 - - { - id: "088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c", - parent: "aeee6396d62273d180a49c96c62e45438d87c7da4a5cf5d2be6bee4e21bc226f", - created: "2013-04-30T17:46:10.843673+03:00", - container: "8305672a76cc5e3d168f97221106ced35a76ec7ddbb03209b0f0d96bf74f6ef7", - container_config: { - Hostname: "host-test", - User: "", - Memory: 0, - MemorySwap: 0, - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - PortSpecs: null, - Tty: false, - OpenStdin: false, - StdinOnce: false, - Env: null, - Cmd: [ - "/bin/bash", - "-c", - "apt-get -q -yy -f install libevent-dev" - ], - Dns: null, - Image: "imagename/blah", - Volumes: { }, - VolumesFrom: "" - }, - docker_version: "0.1.7" - } - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Image not found - - -Ancestry -******** - -.. http:get:: /v1/images/(image_id)/ancestry - - get ancestry for an image given an ``image_id`` - - **Example Request**: - - .. sourcecode:: http - - GET /v1/images/088b4505aa3adc3d35e79c031fa126b403200f02f51920fbd9b7c503e87c7a2c/ancestry HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - :parameter image_id: the id for the layer you want to get - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - ["088b4502f51920fbd9b7c503e87c7a2c05aa3adc3d35e79c031fa126b403200f", - "aeee63968d87c7da4a5cf5d2be6bee4e21bc226fd62273d180a49c96c62e4543", - "bfa4c5326bc764280b0863b46a4b20d940bc1897ef9c1dfec060604bdc383280", - "6ab5893c6927c15a15665191f2c6cf751f5056d8b95ceee32e43c5e8a3648544"] - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Image not found - - -2.2 Tags --------- - -.. http:get:: /v1/repositories/(namespace)/(repository)/tags - - get all of the tags for the given repo. - - **Example Request**: - - .. sourcecode:: http - - GET /v1/repositories/foo/bar/tags HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - - :parameter namespace: namespace for the repo - :parameter repository: name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - { - "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - "0.1.1": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087" - } - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Repository not found - - -.. http:get:: /v1/repositories/(namespace)/(repository)/tags/(tag) - - get a tag for the given repo. - - **Example Request**: - - .. sourcecode:: http - - GET /v1/repositories/foo/bar/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - Cookie: (Cookie provided by the Registry) - - :parameter namespace: namespace for the repo - :parameter repository: name for the repo - :parameter tag: name of tag you want to get - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Tag not found - -.. http:delete:: /v1/repositories/(namespace)/(repository)/tags/(tag) - - delete the tag for the repo - - **Example Request**: - - .. sourcecode:: http - - DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - :parameter namespace: namespace for the repo - :parameter repository: name for the repo - :parameter tag: name of tag you want to delete - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Tag not found - - -.. http:put:: /v1/repositories/(namespace)/(repository)/tags/(tag) - - put a tag for the given repo. - - **Example Request**: - - .. sourcecode:: http - - PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" - - :parameter namespace: namespace for the repo - :parameter repository: name for the repo - :parameter tag: name of tag you want to add - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - :statuscode 400: Invalid data - :statuscode 401: Requires authorization - :statuscode 404: Image not found - -2.3 Repositories ----------------- - -.. http:delete:: /v1/repositories/(namespace)/(repository)/ - - delete a repository - - **Example Request**: - - .. sourcecode:: http - - DELETE /v1/repositories/foo/bar/ HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - Cookie: (Cookie provided by the Registry) - - "" - - :parameter namespace: namespace for the repo - :parameter repository: name for the repo - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - :statuscode 401: Requires authorization - :statuscode 404: Repository not found - -2.4 Status ----------- - -.. http:get:: /v1/_ping - - Check status of the registry. This endpoint is also used to determine if - the registry supports SSL. - - **Example Request**: - - .. sourcecode:: http - - GET /v1/_ping HTTP/1.1 - Host: registry-1.docker.io - Accept: application/json - Content-Type: application/json - - "" - - **Example Response**: - - .. sourcecode:: http - - HTTP/1.1 200 - Vary: Accept - Content-Type: application/json - X-Docker-Registry-Version: 0.6.0 - - "" - - :statuscode 200: OK - - -3 Authorization -=============== -This is where we describe the authorization process, including the tokens and cookies. - -TODO: add more info. diff --git a/docs/sources/reference/api/registry_index_spec.rst b/docs/sources/reference/api/registry_index_spec.rst deleted file mode 100644 index 89f6319f5c..0000000000 --- a/docs/sources/reference/api/registry_index_spec.rst +++ /dev/null @@ -1,622 +0,0 @@ -:title: Registry Documentation -:description: Documentation for docker Registry and Registry API -:keywords: docker, registry, api, index - -.. _registryindexspec: - -===================== -Registry & Index Spec -===================== - -1. The 3 roles -=============== - -1.1 Index ---------- - -The Index is responsible for centralizing information about: - -- User accounts -- Checksums of the images -- Public namespaces - -The Index has different components: - -- Web UI -- Meta-data store (comments, stars, list public repositories) -- Authentication service -- Tokenization - -The index is authoritative for those information. - -We expect that there will be only one instance of the index, run and managed by Docker Inc. - -1.2 Registry ------------- -- It stores the images and the graph for a set of repositories -- It does not have user accounts data -- It has no notion of user accounts or authorization -- It delegates authentication and authorization to the Index Auth service using tokens -- It supports different storage backends (S3, cloud files, local FS) -- It doesn’t have a local database -- `Source Code `_ - -We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries: - -- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index. -- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally. -- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution. -- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotCloud’s control. It can optionally delegate additional authorization to the Index, but it is not mandatory. - -.. note:: - - The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial): - - HTTP with GET (and PUT for read-write registries); - - local mount point; - - remote docker addressed through SSH. - -The latter would only require two new commands in docker, e.g. ``registryget`` -and ``registryput``, wrapping access to the local filesystem (and optionally -doing consistency checks). Authentication and authorization are then delegated -to SSH (e.g. with public keys). - -1.3 Docker ----------- - -On top of being a runtime for LXC, Docker is the Registry client. It supports: - -- Push / Pull on the registry -- Client authentication on the Index - -2. Workflow -=========== - -2.1 Pull --------- - -.. image:: /static_files/docker_pull_chart.png - -1. Contact the Index to know where I should download “samalba/busybox” -2. Index replies: - a. ``samalba/busybox`` is on Registry A - b. here are the checksums for ``samalba/busybox`` (for all layers) - c. token -3. Contact Registry A to receive the layers for ``samalba/busybox`` (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location. -4. registry contacts index to verify if token/user is allowed to download images -5. Index returns true/false lettings registry know if it should proceed or error out -6. Get the payload for all layers - -It's possible to run: - -.. code-block:: bash - - docker pull https:///repositories/samalba/busybox - -In this case, Docker bypasses the Index. However the security is not guaranteed -(in case Registry A is corrupted) because there won’t be any checksum checks. - -Currently registry redirects to s3 urls for downloads, going forward all -downloads need to be streamed through the registry. The Registry will then -abstract the calls to S3 by a top-level class which implements sub-classes for -S3 and local storage. - -Token is only returned when the ``X-Docker-Token`` header is sent with request. - -Basic Auth is required to pull private repos. Basic auth isn't required for -pulling public repos, but if one is provided, it needs to be valid and for an -active account. - -API (pulling repository foo/bar): -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. (Docker -> Index) GET /v1/repositories/foo/bar/images - **Headers**: - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - X-Docker-Token: true - **Action**: - (looking up the foo/bar in db and gets images and checksums for that repo (all if no tag is specified, if tag, only checksums for those tags) see part 4.4.1) - -2. (Index -> Docker) HTTP 200 OK - - **Headers**: - - Authorization: Token signature=123abc,repository=”foo/bar”,access=write - - X-Docker-Endpoints: registry.docker.io [, registry2.docker.io] - **Body**: - Jsonified checksums (see part 4.4.1) - -3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=write - -4. (Registry -> Index) GET /v1/repositories/foo/bar/images - - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=read - - **Body**: - - - **Action**: - ( Lookup token see if they have access to pull.) - - If good: - HTTP 200 OK - Index will invalidate the token - If bad: - HTTP 401 Unauthorized - -5. (Docker -> Registry) GET /v1/images/928374982374/ancestry - **Action**: - (for each image id returned in the registry, fetch /json + /layer) - -.. note:: - - If someone makes a second request, then we will always give a new token, never reuse tokens. - -2.2 Push --------- - -.. image:: /static_files/docker_push_chart.png - -1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials) -2. If authentication works and namespace available, “samalba/busybox” is allocated and a temporary token is returned (namespace is marked as initialized in index) -3. Push the image on the registry (along with the token) -4. Registry A contacts the Index to verify the token (token must corresponds to the repository name) -5. Index validates the token. Registry A starts reading the stream pushed by docker and store the repository (with its images) -6. docker contacts the index to give checksums for upload images - -.. note:: - - **It’s possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authenticated and the security is not guaranteed. - -.. note:: - - **Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies. - -Docker computes the checksums and submit them to the Index at the end of the -push. When a repository name does not have checksums on the Index, it means -that the push is in progress (since checksums are submitted at the end). - -API (pushing repos foo/bar): -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. (Docker -> Index) PUT /v1/repositories/foo/bar/ - **Headers**: - Authorization: Basic sdkjfskdjfhsdkjfh== - X-Docker-Token: true - - **Action**:: - - in index, we allocated a new repository, and set to initialized - - **Body**:: - (The body contains the list of images that are going to be pushed, with empty checksums. The checksums will be set at the end of the push):: - - [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}] - -2. (Index -> Docker) 200 Created - **Headers**: - - WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=write - - X-Docker-Endpoints: registry.docker.io [, registry2.docker.io] - -3. (Docker -> Registry) PUT /v1/images/98765432_parent/json - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=write - -4. (Registry->Index) GET /v1/repositories/foo/bar/images - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=write - **Action**:: - - Index: - will invalidate the token. - - Registry: - grants a session (if token is approved) and fetches the images id - -5. (Docker -> Registry) PUT /v1/images/98765432_parent/json - **Headers**:: - - Authorization: Token signature=123abc,repository=”foo/bar”,access=write - - Cookie: (Cookie provided by the Registry) - -6. (Docker -> Registry) PUT /v1/images/98765432/json - **Headers**: - Cookie: (Cookie provided by the Registry) - -7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer - **Headers**: - Cookie: (Cookie provided by the Registry) - -8. (Docker -> Registry) PUT /v1/images/98765432/layer - **Headers**: - X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh - -9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest - **Headers**: - Cookie: (Cookie provided by the Registry) - **Body**: - “98765432” - -10. (Docker -> Index) PUT /v1/repositories/foo/bar/images - - **Headers**: - Authorization: Basic 123oislifjsldfj== - X-Docker-Endpoints: registry1.docker.io (no validation on this right now) - - **Body**: - (The image, id’s, tags and checksums) - - [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, - “checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}] - - **Return** HTTP 204 - -.. note:: - - If push fails and they need to start again, what happens in the index, there will already be a record for the namespace/name, but it will be initialized. Should we allow it, or mark as name already used? One edge case could be if someone pushes the same thing at the same time with two different shells. - - If it's a retry on the Registry, Docker has a cookie (provided by the registry after token validation). So the Index won’t have to provide a new token. - -2.3 Delete ----------- - -If you need to delete something from the index or registry, we need a nice -clean way to do that. Here is the workflow. - -1. Docker contacts the index to request a delete of a repository ``samalba/busybox`` (authentication required with user credentials) -2. If authentication works and repository is valid, ``samalba/busybox`` is marked as deleted and a temporary token is returned -3. Send a delete request to the registry for the repository (along with the token) -4. Registry A contacts the Index to verify the token (token must corresponds to the repository name) -5. Index validates the token. Registry A deletes the repository and everything associated to it. -6. docker contacts the index to let it know it was removed from the registry, the index removes all records from the database. - -.. note:: - - The Docker client should present an "Are you sure?" prompt to confirm the deletion before starting the process. Once it starts it can't be undone. - -API (deleting repository foo/bar): -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -1. (Docker -> Index) DELETE /v1/repositories/foo/bar/ - **Headers**: - Authorization: Basic sdkjfskdjfhsdkjfh== - X-Docker-Token: true - - **Action**:: - - in index, we make sure it is a valid repository, and set to deleted (logically) - - **Body**:: - Empty - -2. (Index -> Docker) 202 Accepted - **Headers**: - - WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=delete - - X-Docker-Endpoints: registry.docker.io [, registry2.docker.io] # list of endpoints where this repo lives. - -3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/ - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=delete - -4. (Registry->Index) PUT /v1/repositories/foo/bar/auth - **Headers**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=delete - **Action**:: - - Index: - will invalidate the token. - - Registry: - deletes the repository (if token is approved) - -5. (Registry -> Docker) 200 OK - 200 If success - 403 if forbidden - 400 if bad request - 404 if repository isn't found - -6. (Docker -> Index) DELETE /v1/repositories/foo/bar/ - - **Headers**: - Authorization: Basic 123oislifjsldfj== - X-Docker-Endpoints: registry-1.docker.io (no validation on this right now) - - **Body**: - Empty - - **Return** HTTP 200 - - -3. How to use the Registry in standalone mode -============================================= - -The Index has two main purposes (along with its fancy social features): - -- Resolve short names (to avoid passing absolute URLs all the time) - - username/projectname -> \https://registry.docker.io/users//repositories// - - team/projectname -> \https://registry.docker.io/team//repositories// -- Authenticate a user as a repos owner (for a central referenced repository) - -3.1 Without an Index --------------------- - -Using the Registry without the Index can be useful to store the images on a -private network without having to rely on an external entity controlled by -Docker Inc. - -In this case, the registry will be launched in a special mode (--standalone? ---no-index?). In this mode, the only thing which changes is that Registry will -never contact the Index to verify a token. It will be the Registry owner -responsibility to authenticate the user who pushes (or even pulls) an image -using any mechanism (HTTP auth, IP based, etc...). - -In this scenario, the Registry is responsible for the security in case of data -corruption since the checksums are not delivered by a trusted entity. - -As hinted previously, a standalone registry can also be implemented by any HTTP -server handling GET/PUT requests (or even only GET requests if no write access -is necessary). - -3.2 With an Index ------------------ - -The Index data needed by the Registry are simple: - -- Serve the checksums -- Provide and authorize a Token - -In the scenario of a Registry running on a private network with the need of -centralizing and authorizing, it’s easy to use a custom Index. - -The only challenge will be to tell Docker to contact (and trust) this custom -Index. Docker will be configurable at some point to use a specific Index, it’ll -be the private entity responsibility (basically the organization who uses -Docker in a private environment) to maintain the Index and the Docker’s -configuration among its consumers. - -4. The API -========== - -The first version of the api is available here: https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md - -4.1 Images ----------- - -The format returned in the images is not defined here (for layer and JSON), -basically because Registry stores exactly the same kind of information as -Docker uses to manage them. - -The format of ancestry is a line-separated list of image ids, in age order, -i.e. the image’s parent is on the last line, the parent of the parent on the -next-to-last line, etc.; if the image has no parent, the file is empty. - -.. code-block:: bash - - GET /v1/images//layer - PUT /v1/images//layer - GET /v1/images//json - PUT /v1/images//json - GET /v1/images//ancestry - PUT /v1/images//ancestry - -4.2 Users ---------- - -4.2.1 Create a user (Index) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -POST /v1/users - -**Body**: - {"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'} - -**Validation**: - - **username**: min 4 character, max 30 characters, must match the regular - expression [a-z0-9\_]. - - **password**: min 5 characters - -**Valid**: return HTTP 200 - -Errors: HTTP 400 (we should create error codes for possible errors) -- invalid json -- missing field -- wrong format (username, password, email, etc) -- forbidden name -- name already exists - -.. note:: - - A user account will be valid only if the email has been validated (a validation link is sent to the email address). - -4.2.2 Update a user (Index) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -PUT /v1/users/ - -**Body**: - {"password": "toto"} - -.. note:: - - We can also update email address, if they do, they will need to reverify their new email address. - -4.2.3 Login (Index) -^^^^^^^^^^^^^^^^^^^ - -Does nothing else but asking for a user authentication. Can be used to validate -credentials. HTTP Basic Auth for now, maybe change in future. - -GET /v1/users - -**Return**: - - Valid: HTTP 200 - - Invalid login: HTTP 401 - - Account inactive: HTTP 403 Account is not Active - -4.3 Tags (Registry) -------------------- - -The Registry does not know anything about users. Even though repositories are -under usernames, it’s just a namespace for the registry. Allowing us to -implement organizations or different namespaces per user later, without -modifying the Registry’s API. - -The following naming restrictions apply: - -- Namespaces must match the same regular expression as usernames (See 4.2.1.) -- Repository names must match the regular expression [a-zA-Z0-9-_.] - -4.3.1 Get all tags -^^^^^^^^^^^^^^^^^^ - -GET /v1/repositories///tags - -**Return**: HTTP 200 - { - "latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f", - “0.1.1”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087” - } - -4.3.2 Read the content of a tag (resolve the image id) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -GET /v1/repositories///tags/ - -**Return**: - "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f" - -4.3.3 Delete a tag (registry) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -DELETE /v1/repositories///tags/ - -4.4 Images (Index) ------------------- - -For the Index to “resolve” the repository name to a Registry location, it uses -the X-Docker-Endpoints header. In other terms, this requests always add a -``X-Docker-Endpoints`` to indicate the location of the registry which hosts this -repository. - -4.4.1 Get the images -^^^^^^^^^^^^^^^^^^^^^ - -GET /v1/repositories///images - -**Return**: HTTP 200 - [{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}] - - -4.4.2 Add/update the images -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You always add images, you never remove them. - -PUT /v1/repositories///images - -**Body**: - [ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ] - -**Return** 204 - -4.5 Repositories ----------------- - -4.5.1 Remove a Repository (Registry) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -DELETE /v1/repositories// - -Return 200 OK - -4.5.2 Remove a Repository (Index) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This starts the delete process. see 2.3 for more details. - -DELETE /v1/repositories// - -Return 202 OK - -5. Chaining Registries -====================== - -It’s possible to chain Registries server for several reasons: - -- Load balancing -- Delegate the next request to another server - -When a Registry is a reference for a repository, it should host the entire -images chain in order to avoid breaking the chain during the download. - -The Index and Registry use this mechanism to redirect on one or the other. - -Example with an image download: - -On every request, a special header can be returned:: - - X-Docker-Endpoints: server1,server2 - -On the next request, the client will always pick a server from this list. - -6. Authentication & Authorization -================================= - -6.1 On the Index ------------------ - -The Index supports both “Basic” and “Token” challenges. Usually when there is a -``401 Unauthorized``, the Index replies this:: - - 401 Unauthorized - WWW-Authenticate: Basic realm="auth required",Token - -You have 3 options: - -1. Provide user credentials and ask for a token - - **Header**: - - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - - X-Docker-Token: true - - In this case, along with the 200 response, you’ll get a new token (if user auth is ok): - If authorization isn't correct you get a 401 response. - If account isn't active you will get a 403 response. - - **Response**: - - 200 OK - - X-Docker-Token: Token signature=123abc,repository=”foo/bar”,access=read - -2. Provide user credentials only - - **Header**: - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - -3. Provide Token - - **Header**: - Authorization: Token signature=123abc,repository=”foo/bar”,access=read - -6.2 On the Registry -------------------- - -The Registry only supports the Token challenge:: - - 401 Unauthorized - WWW-Authenticate: Token - -The only way is to provide a token on ``401 Unauthorized`` responses:: - - Authorization: Token signature=123abc,repository="foo/bar",access=read - -Usually, the Registry provides a Cookie when a Token verification succeeded. -Every time the Registry passes a Cookie, you have to pass it back the same -cookie.:: - - 200 OK - Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly - -Next request:: - - GET /(...) - Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4=" - - -7 Document Version -==================== - -- 1.0 : May 6th 2013 : initial release -- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new source namespace. diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst deleted file mode 100644 index c94c68cd48..0000000000 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ /dev/null @@ -1,55 +0,0 @@ -:title: Remote API Client Libraries -:description: Various client libraries available to use with the Docker remote API -:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, JavaScript, Erlang, Go - - -================================== -Docker Remote API Client Libraries -================================== - -These libraries have not been tested by the Docker Maintainers for -compatibility. Please file issues with the library owners. If you -find more library implementations, please list them in Docker doc bugs -and we will add the libraries here. - -+----------------------+----------------+--------------------------------------------+----------+ -| Language/Framework | Name | Repository | Status | -+======================+================+============================================+==========+ -| Python | docker-py | https://github.com/dotcloud/docker-py | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Ruby | docker-client | https://github.com/geku/docker-client | Outdated | -+----------------------+----------------+--------------------------------------------+----------+ -| Ruby | docker-api | https://github.com/swipely/docker-api | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| JavaScript (NodeJS) | dockerode | https://github.com/apocas/dockerode | Active | -| | | Install via NPM: `npm install dockerode` | | -+----------------------+----------------+--------------------------------------------+----------+ -| JavaScript (NodeJS) | docker.io | https://github.com/appersonlabs/docker.io | Active | -| | | Install via NPM: `npm install docker.io` | | -+----------------------+----------------+--------------------------------------------+----------+ -| JavaScript | docker-js | https://github.com/dgoujard/docker-js | Outdated | -+----------------------+----------------+--------------------------------------------+----------+ -| JavaScript (Angular) | docker-cp | https://github.com/13W/docker-cp | Active | -| **WebUI** | | | | -+----------------------+----------------+--------------------------------------------+----------+ -| JavaScript (Angular) | dockerui | https://github.com/crosbymichael/dockerui | Active | -| **WebUI** | | | | -+----------------------+----------------+--------------------------------------------+----------+ -| Java | docker-java | https://github.com/kpelykh/docker-java | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Erlang | erldocker | https://github.com/proger/erldocker | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Go | dockerclient | https://github.com/samalba/dockerclient | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Perl | Eixo::Docker | https://github.com/alambike/eixo-docker | Active | -+----------------------+----------------+--------------------------------------------+----------+ -| Scala | reactive-docker| https://github.com/almoehi/reactive-docker | Active | -+----------------------+----------------+--------------------------------------------+----------+ diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst deleted file mode 100644 index e8897d1b09..0000000000 --- a/docs/sources/reference/builder.rst +++ /dev/null @@ -1,532 +0,0 @@ -:title: Dockerfile Reference -:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image. -:keywords: builder, docker, Dockerfile, automation, image creation - -.. _dockerbuilder: - -==================== -Dockerfile Reference -==================== - -**Docker can act as a builder** and read instructions from a text -``Dockerfile`` to automate the steps you would otherwise take manually -to create an image. Executing ``docker build`` will run your steps and -commit them along the way, giving you a final image. - -.. _dockerfile_usage: - -Usage -===== - -To :ref:`build ` an image from a source repository, create -a description file called ``Dockerfile`` at the root of your -repository. This file will describe the steps to assemble the image. - -Then call ``docker build`` with the path of your source repository as -argument (for example, ``.``): - - ``sudo docker build .`` - -The path to the source repository defines where to find the *context* -of the build. The build is run by the Docker daemon, not by the CLI, -so the whole context must be transferred to the daemon. The Docker CLI -reports "Uploading context" when the context is sent to the daemon. - -You can specify a repository and tag at which to save the new image if the -build succeeds: - - ``sudo docker build -t shykes/myapp .`` - -The Docker daemon will run your steps one-by-one, committing the -result to a new image if necessary, before finally outputting the -ID of your new image. The Docker daemon will automatically clean -up the context you sent. - -Note that each instruction is run independently, and causes a new image -to be created - so ``RUN cd /tmp`` will not have any effect on the next -instructions. - -Whenever possible, Docker will re-use the intermediate images, -accelerating ``docker build`` significantly (indicated by ``Using cache``): - -.. code-block:: bash - - $ docker build -t SvenDowideit/ambassador . - Uploading context 10.24 kB - Uploading context - Step 1 : FROM docker-ut - ---> cbba202fe96b - Step 2 : MAINTAINER SvenDowideit@home.org.au - ---> Using cache - ---> 51182097be13 - Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top - ---> Using cache - ---> 1a5ffc17324d - Successfully built 1a5ffc17324d - -When you're done with your build, you're ready to look into -:ref:`image_push`. - -.. _dockerfile_format: - -Format -====== - -Here is the format of the Dockerfile: - -:: - - # Comment - INSTRUCTION arguments - -The Instruction is not case-sensitive, however convention is for them to be -UPPERCASE in order to distinguish them from arguments more easily. - -Docker evaluates the instructions in a Dockerfile in order. **The -first instruction must be `FROM`** in order to specify the -:ref:`base_image_def` from which you are building. - -Docker will treat lines that *begin* with ``#`` as a comment. A ``#`` -marker anywhere else in the line will be treated as an argument. This -allows statements like: - -:: - - # Comment - RUN echo 'we are running some # of cool things' - -.. _dockerfile_instructions: - - -Here is the set of instructions you can use in a ``Dockerfile`` for -building images. - -.. _dockerfile_from: - -``FROM`` -======== - - ``FROM `` - -Or - - ``FROM :`` - -The ``FROM`` instruction sets the :ref:`base_image_def` for subsequent -instructions. As such, a valid Dockerfile must have ``FROM`` as its -first instruction. The image can be any valid image -- it is -especially easy to start by **pulling an image** from the -:ref:`using_public_repositories`. - -``FROM`` must be the first non-comment instruction in the -``Dockerfile``. - -``FROM`` can appear multiple times within a single Dockerfile in order -to create multiple images. Simply make a note of the last image id -output by the commit before each new ``FROM`` command. - -If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is -assumed. If the used tag does not exist, an error will be returned. - -.. _dockerfile_maintainer: - -``MAINTAINER`` -============== - - ``MAINTAINER `` - -The ``MAINTAINER`` instruction allows you to set the *Author* field of -the generated images. - -.. _dockerfile_run: - -``RUN`` -======= - -RUN has 2 forms: - -* ``RUN `` (the command is run in a shell - ``/bin/sh -c``) -* ``RUN ["executable", "param1", "param2"]`` (*exec* form) - -The ``RUN`` instruction will execute any commands in a new layer on top -of the current image and commit the results. The resulting committed image -will be used for the next step in the Dockerfile. - -Layering ``RUN`` instructions and generating commits conforms to the -core concepts of Docker where commits are cheap and containers can be -created from any point in an image's history, much like source -control. - -The *exec* form makes it possible to avoid shell string munging, and to ``RUN`` -commands using a base image that does not contain ``/bin/sh``. - -Known Issues (RUN) -.................. - -* :issue:`783` is about file permissions problems that can occur when - using the AUFS file system. You might notice it during an attempt to - ``rm`` a file, for example. The issue describes a workaround. -* :issue:`2424` Locale will not be set automatically. - -.. _dockerfile_cmd: - -``CMD`` -======= - -CMD has three forms: - -* ``CMD ["executable","param1","param2"]`` (like an *exec*, preferred form) -* ``CMD ["param1","param2"]`` (as *default parameters to ENTRYPOINT*) -* ``CMD command param1 param2`` (as a *shell*) - -There can only be one CMD in a Dockerfile. If you list more than one -CMD then only the last CMD will take effect. - -**The main purpose of a CMD is to provide defaults for an executing -container.** These defaults can include an executable, or they can -omit the executable, in which case you must specify an ENTRYPOINT as -well. - -When used in the shell or exec formats, the ``CMD`` instruction sets -the command to be executed when running the image. - -If you use the *shell* form of the CMD, then the ```` will -execute in ``/bin/sh -c``: - -.. code-block:: bash - - FROM ubuntu - CMD echo "This is a test." | wc - - -If you want to **run your** ```` **without a shell** then you -must express the command as a JSON array and give the full path to the -executable. **This array form is the preferred format of CMD.** Any -additional parameters must be individually expressed as strings in the -array: - -.. code-block:: bash - - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - -If you would like your container to run the same executable every -time, then you should consider using ``ENTRYPOINT`` in combination -with ``CMD``. See :ref:`dockerfile_entrypoint`. - -If the user specifies arguments to ``docker run`` then they will -override the default specified in CMD. - -.. note:: - Don't confuse ``RUN`` with ``CMD``. ``RUN`` actually runs a - command and commits the result; ``CMD`` does not execute anything at - build time, but specifies the intended command for the image. - -.. _dockerfile_expose: - -``EXPOSE`` -========== - - ``EXPOSE [...]`` - -The ``EXPOSE`` instructions informs Docker that the container will listen -on the specified network ports at runtime. Docker uses this information -to interconnect containers using links (see :ref:`links `), -and to setup port redirection on the host system (see :ref:`port_redirection`). - -.. _dockerfile_env: - -``ENV`` -======= - - ``ENV `` - -The ``ENV`` instruction sets the environment variable ```` to the -value ````. This value will be passed to all future ``RUN`` -instructions. This is functionally equivalent to prefixing the command -with ``=`` - -The environment variables set using ``ENV`` will persist when a container is run -from the resulting image. You can view the values using ``docker inspect``, and change them using ``docker run --env =``. - -.. note:: - One example where this can cause unexpected consequenses, is setting - ``ENV DEBIAN_FRONTEND noninteractive``. - Which will persist when the container is run interactively; for example: - ``docker run -t -i image bash`` - -.. _dockerfile_add: - -``ADD`` -======= - - ``ADD `` - -The ``ADD`` instruction will copy new files from and add them to -the container's filesystem at path ````. - -```` must be the path to a file or directory relative to the -source directory being built (also called the *context* of the build) or -a remote file URL. - -```` is the absolute path to which the source will be copied inside the -destination container. - -All new files and directories are created with mode 0755, uid and gid -0. - -.. note:: - if you build using STDIN (``docker build - < somefile``), there is no build - context, so the Dockerfile can only contain an URL based ADD statement. - -.. note:: - if your URL files are protected using authentication, you will need to use - an ``RUN wget`` , ``RUN curl`` or other tool from within the container as - ADD does not support authentication. - -The copy obeys the following rules: - -* The ```` path must be inside the *context* of the build; you cannot - ``ADD ../something /something``, because the first step of a - ``docker build`` is to send the context directory (and subdirectories) to - the docker daemon. -* If ```` is a URL and ```` does not end with a trailing slash, - then a file is downloaded from the URL and copied to ````. -* If ```` is a URL and ```` does end with a trailing slash, - then the filename is inferred from the URL and the file is downloaded to - ``/``. For instance, ``ADD http://example.com/foobar /`` - would create the file ``/foobar``. The URL must have a nontrivial path - so that an appropriate filename can be discovered in this case - (``http://example.com`` will not work). -* If ```` is a directory, the entire directory is copied, - including filesystem metadata. -* If ```` is a *local* tar archive in a recognized compression - format (identity, gzip, bzip2 or xz) then it is unpacked as a - directory. Resources from *remote* URLs are **not** decompressed. - - When a directory is copied or unpacked, it has the same behavior as - ``tar -x``: the result is the union of - - 1. whatever existed at the destination path and - 2. the contents of the source tree, - - with conflicts resolved in favor of "2." on a file-by-file basis. - -* If ```` is any other kind of file, it is copied individually - along with its metadata. In this case, if ```` ends with a - trailing slash ``/``, it will be considered a directory and the - contents of ```` will be written at ``/base()``. -* If ```` does not end with a trailing slash, it will be - considered a regular file and the contents of ```` will be - written at ````. -* If ```` doesn't exist, it is created along with all missing - directories in its path. - -.. _dockerfile_entrypoint: - -``ENTRYPOINT`` -============== - -ENTRYPOINT has two forms: - -* ``ENTRYPOINT ["executable", "param1", "param2"]`` (like an *exec*, - preferred form) -* ``ENTRYPOINT command param1 param2`` (as a *shell*) - -There can only be one ``ENTRYPOINT`` in a Dockerfile. If you have more -than one ``ENTRYPOINT``, then only the last one in the Dockerfile will -have an effect. - -An ``ENTRYPOINT`` helps you to configure a container that you can run -as an executable. That is, when you specify an ``ENTRYPOINT``, then -the whole container runs as if it was just that executable. - -The ``ENTRYPOINT`` instruction adds an entry command that will **not** -be overwritten when arguments are passed to ``docker run``, unlike the -behavior of ``CMD``. This allows arguments to be passed to the -entrypoint. i.e. ``docker run -d`` will pass the "-d" -argument to the ENTRYPOINT. - -You can specify parameters either in the ENTRYPOINT JSON array (as in -"like an exec" above), or by using a CMD statement. Parameters in the -ENTRYPOINT will not be overridden by the ``docker run`` arguments, but -parameters specified via CMD will be overridden by ``docker run`` -arguments. - -Like a ``CMD``, you can specify a plain string for the ENTRYPOINT and -it will execute in ``/bin/sh -c``: - -.. code-block:: bash - - FROM ubuntu - ENTRYPOINT wc -l - - -For example, that Dockerfile's image will *always* take stdin as input -("-") and print the number of lines ("-l"). If you wanted to make -this optional but default, you could use a CMD: - -.. code-block:: bash - - FROM ubuntu - CMD ["-l", "-"] - ENTRYPOINT ["/usr/bin/wc"] - -.. _dockerfile_volume: - -``VOLUME`` -========== - - ``VOLUME ["/data"]`` - -The ``VOLUME`` instruction will create a mount point with the specified name and mark it -as holding externally mounted volumes from native host or other containers. For more information/examples -and mounting instructions via docker client, refer to :ref:`volume_def` documentation. - -.. _dockerfile_user: - -``USER`` -======== - - ``USER daemon`` - -The ``USER`` instruction sets the username or UID to use when running -the image. - -.. _dockerfile_workdir: - -``WORKDIR`` -=========== - - ``WORKDIR /path/to/workdir`` - -The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and -``ENTRYPOINT`` Dockerfile commands that follow it. - -It can be used multiple times in the one Dockerfile. If a relative path is -provided, it will be relative to the path of the previous ``WORKDIR`` -instruction. For example: - - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - -The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``. - -``ONBUILD`` -=========== - - ``ONBUILD [INSTRUCTION]`` - -The ``ONBUILD`` instruction adds to the image a "trigger" instruction to be -executed at a later time, when the image is used as the base for another build. -The trigger will be executed in the context of the downstream build, as if it -had been inserted immediately after the *FROM* instruction in the downstream -Dockerfile. - -Any build instruction can be registered as a trigger. - -This is useful if you are building an image which will be used as a base to build -other images, for example an application build environment or a daemon which may be -customized with user-specific configuration. - -For example, if your image is a reusable python application builder, it will require -application source code to be added in a particular directory, and it might require -a build script to be called *after* that. You can't just call *ADD* and *RUN* now, -because you don't yet have access to the application source code, and it will be -different for each application build. You could simply provide application developers -with a boilerplate Dockerfile to copy-paste into their application, but that is -inefficient, error-prone and difficult to update because it mixes with -application-specific code. - -The solution is to use *ONBUILD* to register in advance instructions to run later, -during the next build stage. - -Here's how it works: - -1. When it encounters an *ONBUILD* instruction, the builder adds a trigger to - the metadata of the image being built. - The instruction does not otherwise affect the current build. - -2. At the end of the build, a list of all triggers is stored in the image manifest, - under the key *OnBuild*. They can be inspected with *docker inspect*. - -3. Later the image may be used as a base for a new build, using the *FROM* instruction. - As part of processing the *FROM* instruction, the downstream builder looks for *ONBUILD* - triggers, and executes them in the same order they were registered. If any of the - triggers fail, the *FROM* instruction is aborted which in turn causes the build - to fail. If all triggers succeed, the FROM instruction completes and the build - continues as usual. - -4. Triggers are cleared from the final image after being executed. In other words - they are not inherited by "grand-children" builds. - -For example you might add something like this: - -.. code-block:: bash - - [...] - ONBUILD ADD . /app/src - ONBUILD RUN /usr/local/bin/python-build --dir /app/src - [...] - -.. warning:: Chaining ONBUILD instructions using `ONBUILD ONBUILD` isn't allowed. -.. warning:: ONBUILD may not trigger FROM or MAINTAINER instructions. - -.. _dockerfile_examples: - -Dockerfile Examples -====================== - -.. code-block:: bash - - # Nginx - # - # VERSION 0.0.1 - - FROM ubuntu - MAINTAINER Guillaume J. Charmes - - # make sure the package repository is up to date - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update - - RUN apt-get install -y inotify-tools nginx apache2 openssh-server - -.. code-block:: bash - - # Firefox over VNC - # - # VERSION 0.3 - - FROM ubuntu - # make sure the package repository is up to date - RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list - RUN apt-get update - - # Install vnc, xvfb in order to create a 'fake' display and firefox - RUN apt-get install -y x11vnc xvfb firefox - RUN mkdir /.vnc - # Setup a password - RUN x11vnc -storepasswd 1234 ~/.vnc/passwd - # Autostart firefox (might not be the best way, but it does the trick) - RUN bash -c 'echo "firefox" >> /.bashrc' - - EXPOSE 5900 - CMD ["x11vnc", "-forever", "-usepw", "-create"] - -.. code-block:: bash - - # Multiple images example - # - # VERSION 0.1 - - FROM ubuntu - RUN echo foo > bar - # Will output something like ===> 907ad6c2736f - - FROM ubuntu - RUN echo moo > oink - # Will output something like ===> 695d7793cbe4 - - # You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with - # /oink. diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst deleted file mode 100644 index 9c1d3ae4be..0000000000 --- a/docs/sources/reference/commandline/cli.rst +++ /dev/null @@ -1,1405 +0,0 @@ -:title: Command Line Interface -:description: Docker's CLI command description and usage -:keywords: Docker, Docker documentation, CLI, command line - -.. _cli: - -Command Line -============ - -To list available commands, either run ``docker`` with no parameters or execute -``docker help``:: - - $ sudo docker - Usage: docker [OPTIONS] COMMAND [arg...] - -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - - A self-sufficient runtime for linux containers. - - ... - -.. _cli_options: - -Option types ------------- - -Single character commandline options can be combined, so rather than typing -``docker run -t -i --name test busybox sh``, you can write -``docker run -ti --name test busybox sh``. - -Boolean -~~~~~~~ - -Boolean options look like ``-d=false``. The value you see is the -default value which gets set if you do **not** use the boolean -flag. If you do call ``run -d``, that sets the opposite boolean value, -so in this case, ``true``, and so ``docker run -d`` **will** run in -"detached" mode, in the background. Other boolean options are similar --- specifying them will set the value to the opposite of the default -value. - -Multi -~~~~~ - -Options like ``-a=[]`` indicate they can be specified multiple times:: - - docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash - -Sometimes this can use a more complex value string, as for ``-v``:: - - docker run -v /host:/container example/mysql - -Strings and Integers -~~~~~~~~~~~~~~~~~~~~ - -Options like ``--name=""`` expect a string, and they can only be -specified once. Options like ``-c=0`` expect an integer, and they can -only be specified once. - -.. _cli_daemon: - -``daemon`` ----------- - -:: - - Usage of docker: - -D, --debug=false: Enable debug mode - -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group - -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - --api-enable-cors=false: Enable CORS headers in the remote API - -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking - -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b - -d, --daemon=false: Enable daemon mode - --dns=[]: Force docker to use specific DNS servers - --dns-search=[]: Force Docker to use specific DNS search domains - -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime - --icc=true: Enable inter-container communication - --ip="0.0.0.0": Default IP address to use when binding container ports - --ip-forward=true: Enable net.ipv4.ip_forward - --iptables=true: Enable Docker's addition of iptables rules - -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file - -r, --restart=true: Restart previously running containers - -s, --storage-driver="": Force the docker runtime to use a specific storage driver - -e, --exec-driver="native": Force the docker runtime to use a specific exec driver - -v, --version=false: Print version information and quit - --tls=false: Use TLS; implied by tls-verify flags - --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here - --tlscert="~/.docker/cert.pem": Path to TLS certificate file - --tlskey="~/.docker/key.pem": Path to TLS key file - --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) - --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available - - Options with [] may be specified multiple times. - -The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the -daemon and client. To run the daemon you provide the ``-d`` flag. - -To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``. - -To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``. - -To set the DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``. - -To run the daemon with debug output, use ``docker -d -D``. - -To use lxc as the execution driver, use ``docker -d -e lxc``. - -The docker client will also honor the ``DOCKER_HOST`` environment variable to set -the ``-H`` flag for the client. - -:: - - docker -H tcp://0.0.0.0:4243 ps - # or - export DOCKER_HOST="tcp://0.0.0.0:4243" - docker ps - # both are equal - -To run the daemon with `systemd socket activation `_, use ``docker -d -H fd://``. -Using ``fd://`` will work perfectly for most setups but you can also specify individual sockets too ``docker -d -H fd://3``. -If the specified socket activated files aren't found then docker will exit. -You can find examples of using systemd socket activation with docker and systemd in the `docker source tree `_. - -Docker supports softlinks for the Docker data directory (``/var/lib/docker``) and for ``/tmp``. -TMPDIR and the data directory can be set like this: - -:: - - TMPDIR=/mnt/disk2/tmp /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 - # or - export TMPDIR=/mnt/disk2/tmp - /usr/local/bin/docker -d -D -g /var/lib/docker -H unix:// > /var/lib/boot2docker/docker.log 2>&1 - -.. _cli_attach: - -``attach`` ----------- - -:: - - Usage: docker attach CONTAINER - - Attach to a running container. - - --no-stdin=false: Do not attach stdin - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -The ``attach`` command will allow you to view or interact with any -running container, detached (``-d``) or interactive (``-i``). You can -attach to the same container at the same time - screen sharing style, -or quickly view the progress of your daemonized process. - -You can detach from the container again (and leave it running) with -``CTRL-C`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of -the Docker client when it quits. When you detach from the container's -process the exit code will be returned to the client. - -To stop a container, use ``docker stop``. - -To kill the container, use ``docker kill``. - -.. _cli_attach_examples: - -Examples: -~~~~~~~~~ - -.. code-block:: bash - - $ ID=$(sudo docker run -d ubuntu /usr/bin/top -b) - $ sudo docker attach $ID - top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355560k used, 18012k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221740k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - - - top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355780k used, 17792k free, 27880k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - ^C$ - $ sudo docker stop $ID - -.. _cli_build: - -``build`` ---------- - -:: - - Usage: docker build [OPTIONS] PATH | URL | - - Build a new container image from the source code at PATH - -t, --tag="": Repository name (and optionally a tag) to be applied - to the resulting image in case of success. - -q, --quiet=false: Suppress the verbose output generated by the containers. - --no-cache: Do not use the cache when building the image. - --rm=true: Remove intermediate containers after a successful build - -Use this command to build Docker images from a ``Dockerfile`` and a "context". - -The files at ``PATH`` or ``URL`` are called the "context" of the build. -The build process may refer to any of the files in the context, for example when -using an :ref:`ADD ` instruction. -When a single ``Dockerfile`` is given as ``URL``, then no context is set. - -When a Git repository is set as ``URL``, then the repository is used as the context. -The Git repository is cloned with its submodules (`git clone --recursive`). -A fresh git clone occurs in a temporary directory on your local host, and then this -is sent to the Docker daemon as the context. -This way, your local user credentials and vpn's etc can be used to access private repositories - -.. _cli_build_examples: - -.. seealso:: :ref:`dockerbuilder`. - -Examples: -~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker build . - Uploading context 10240 bytes - Step 1 : FROM busybox - Pulling repository busybox - ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ - Step 2 : RUN ls -lh / - ---> Running in 9c9e81692ae9 - total 24 - drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin - drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev - drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc - drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib - lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib - dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc - lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin - dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys - drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp - drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr - ---> b35f4035db3f - Step 3 : CMD echo Hello World - ---> Running in 02071fceb21b - ---> f52f38b7823e - Successfully built f52f38b7823e - Removing intermediate container 9c9e81692ae9 - Removing intermediate container 02071fceb21b - - -This example specifies that the ``PATH`` is ``.``, and so all the files in -the local directory get tar'd and sent to the Docker daemon. The ``PATH`` -specifies where to find the files for the "context" of the build on -the Docker daemon. Remember that the daemon could be running on a -remote machine and that no parsing of the ``Dockerfile`` happens at the -client side (where you're running ``docker build``). That means that -*all* the files at ``PATH`` get sent, not just the ones listed to -:ref:`ADD ` in the ``Dockerfile``. - -The transfer of context from the local machine to the Docker daemon is -what the ``docker`` client means when you see the "Uploading context" -message. - -If you wish to keep the intermediate containers after the build is complete, -you must use ``--rm=false``. This does not affect the build cache. - - -.. code-block:: bash - - $ sudo docker build -t vieux/apache:2.0 . - -This will build like the previous example, but it will then tag the -resulting image. The repository name will be ``vieux/apache`` and the -tag will be ``2.0`` - - -.. code-block:: bash - - $ sudo docker build - < Dockerfile - -This will read a ``Dockerfile`` from *stdin* without context. Due to -the lack of a context, no contents of any local directory will be sent -to the ``docker`` daemon. Since there is no context, a ``Dockerfile`` -``ADD`` only works if it refers to a remote URL. - -.. code-block:: bash - - $ sudo docker build github.com/creack/docker-firefox - -This will clone the GitHub repository and use the cloned repository as -context. The ``Dockerfile`` at the root of the repository is used as -``Dockerfile``. Note that you can specify an arbitrary Git repository -by using the ``git://`` schema. - - -.. _cli_commit: - -``commit`` ----------- - -:: - - Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - - Create a new image from a container's changes - - -m, --message="": Commit message - -a, --author="": Author (eg. "John Hannibal Smith " - -It can be useful to commit a container's file changes or settings into a new image. -This allows you debug a container by running an interactive shell, or to export -a working dataset to another server. -Generally, it is better to use Dockerfiles to manage your images in a documented -and maintainable way. - -.. _cli_commit_examples: - -Commit an existing container -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker ps - ID IMAGE COMMAND CREATED STATUS PORTS - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - $ docker commit c3f279d17e0a SvenDowideit/testimage:version3 - f5283438590d - $ docker images | head - REPOSITORY TAG ID CREATED VIRTUAL SIZE - SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB - - -.. _cli_cp: - -``cp`` ------- - -:: - - Usage: docker cp CONTAINER:PATH HOSTPATH - - Copy files/folders from the containers filesystem to the host - path. Paths are relative to the root of the filesystem. - -.. code-block:: bash - - $ sudo docker cp 7bb0e258aefe:/etc/debian_version . - $ sudo docker cp blue_frog:/etc/hosts . - -.. _cli_diff: - -``diff`` --------- - -:: - - Usage: docker diff CONTAINER - - List the changed files and directories in a container's filesystem - -There are 3 events that are listed in the 'diff': - -1. ```A``` - Add -2. ```D``` - Delete -3. ```C``` - Change - -For example: - -.. code-block:: bash - - $ sudo docker diff 7bb0e258aefe - - C /dev - A /dev/kmsg - C /etc - A /etc/mtab - A /go - A /go/src - A /go/src/github.com - A /go/src/github.com/dotcloud - A /go/src/github.com/dotcloud/docker - A /go/src/github.com/dotcloud/docker/.git - .... - -.. _cli_events: - -``events`` ----------- - -:: - - Usage: docker events - - Get real time events from the server - - --since="": Show all events created since timestamp - (either seconds since epoch, or date string as below) - --until="": Show events created before timestamp - (either seconds since epoch, or date string as below) - -.. _cli_events_example: - -Examples -~~~~~~~~ - -You'll need two shells for this example. - -Shell 1: Listening for events -............................. - -.. code-block:: bash - - $ sudo docker events - -Shell 2: Start and Stop a Container -................................... - -.. code-block:: bash - - $ sudo docker start 4386fb97867d - $ sudo docker stop 4386fb97867d - -Shell 1: (Again .. now showing events) -...................................... - -.. code-block:: bash - - [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - -Show events in the past from a specified time -............................................. - -.. code-block:: bash - - $ sudo docker events --since 1378216169 - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - - $ sudo docker events --since '2013-09-03' - [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - - $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST' - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die - [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - -.. _cli_export: - -``export`` ----------- - -:: - - Usage: docker export CONTAINER - - Export the contents of a filesystem as a tar archive to STDOUT - -For example: - -.. code-block:: bash - - $ sudo docker export red_panda > latest.tar - -.. _cli_history: - -``history`` ------------ - -:: - - Usage: docker history [OPTIONS] IMAGE - - Show the history of an image - - --no-trunc=false: Don't truncate output - -q, --quiet=false: Only show numeric IDs - -To see how the ``docker:latest`` image was built: - -.. code-block:: bash - - $ docker history docker - IMAGE CREATED CREATED BY SIZE - 3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B - 8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36 8 days ago /bin/sh -c dpkg-reconfigure locales && locale-gen C.UTF-8 && /usr/sbin/update-locale LANG=C.UTF-8 1.245 MB - be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60 8 days ago /bin/sh -c apt-get update && apt-get install -y git libxml2-dev python build-essential make gcc python-dev locales python-pip 338.3 MB - 4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB - 750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian 0 B - 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 9 months ago 0 B - -.. _cli_images: - -``images`` ----------- - -:: - - Usage: docker images [OPTIONS] [NAME] - - List images - - -a, --all=false: Show all images (by default filter out the intermediate image layers) - --no-trunc=false: Don't truncate output - -q, --quiet=false: Only show numeric IDs - -The default ``docker images`` will show all top level images, their repository -and tags, and their virtual size. - -Docker images have intermediate layers that increase reuseability, decrease -disk usage, and speed up ``docker build`` by allowing each step to be cached. -These intermediate layers are not shown by default. - -Listing the most recently created images -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker images | head - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - 77af4d6b9913 19 hours ago 1.089 GB - committest latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB - 0124422dd9f9 20 hours ago 1.089 GB - 18ad6fad3402 22 hours ago 1.082 GB - f9f1e26352f0 23 hours ago 1.089 GB - tryout latest 2629d1fa0b81 23 hours ago 131.5 MB - 5ed6274db6ce 24 hours ago 1.089 GB - -Listing the full length image IDs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker images --no-trunc | head - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB - committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB - 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB - docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB - 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB - 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB - f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB - tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB - 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB - -.. _cli_import: - -``import`` ----------- - -:: - - Usage: docker import URL|- [REPOSITORY[:TAG]] - - Create an empty filesystem image and import the contents of the tarball - (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. - -URLs must start with ``http`` and point to a single -file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a -root filesystem. If you would like to import from a local directory or -archive, you can use the ``-`` parameter to take the data from *stdin*. - -Examples -~~~~~~~~ - -Import from a remote location -............................. - -This will create a new untagged image. - -.. code-block:: bash - - $ sudo docker import http://example.com/exampleimage.tgz - -Import from a local file -........................ - -Import to docker via pipe and *stdin*. - -.. code-block:: bash - - $ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new - -Import from a local directory -............................. - -.. code-block:: bash - - $ sudo tar -c . | docker import - exampleimagedir - -Note the ``sudo`` in this example -- you must preserve the ownership of the -files (especially root ownership) during the archiving with tar. If you are not -root (or the sudo command) when you tar, then the ownerships might not get -preserved. - -.. _cli_info: - -``info`` --------- - -:: - - Usage: docker info - - Display system-wide information. - -.. code-block:: bash - - $ sudo docker info - Containers: 292 - Images: 194 - Debug mode (server): false - Debug mode (client): false - Fds: 22 - Goroutines: 67 - LXC Version: 0.9.0 - EventsListeners: 115 - Kernel Version: 3.8.0-33-generic - WARNING: No swap limit support - -When sending issue reports, please use ``docker version`` and ``docker info`` to -ensure we know how your setup is configured. - -.. _cli_inspect: - -``inspect`` ------------ - -:: - - Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...] - - Return low-level information on a container/image - - -f, --format="": Format the output using the given go template. - -By default, this will render all results in a JSON array. If a format -is specified, the given template will be executed for each result. - -Go's `text/template `_ package -describes all the details of the format. - -Examples -~~~~~~~~ - -Get an instance's IP Address -............................ - -For the most part, you can pick out any field from the JSON in a -fairly straightforward manner. - -.. code-block:: bash - - $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID - -List All Port Bindings -...................... - -One can loop over arrays and maps in the results to produce simple -text output: - -.. code-block:: bash - - $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID - -Find a Specific Port Mapping -............................ - -The ``.Field`` syntax doesn't work when the field name begins with a -number, but the template language's ``index`` function does. The -``.NetworkSettings.Ports`` section contains a map of the internal port -mappings to a list of external address/port objects, so to grab just -the numeric public port, you use ``index`` to find the specific port -map, and then ``index`` 0 contains first object inside of that. Then -we ask for the ``HostPort`` field to get the public address. - -.. code-block:: bash - - $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID - -Get config -.......... - -The ``.Field`` syntax doesn't work when the field contains JSON data, -but the template language's custom ``json`` function does. The ``.config`` -section contains complex json object, so to grab it as JSON, you use ``json`` -to convert config object into JSON - -.. code-block:: bash - - $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID - - -.. _cli_kill: - -``kill`` --------- - -:: - - Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - - Kill a running container (send SIGKILL, or specified signal) - - -s, --signal="KILL": Signal to send to the container - -The main process inside the container will be sent SIGKILL, or any signal specified with option ``--signal``. - -Known Issues (kill) -~~~~~~~~~~~~~~~~~~~ - -* :issue:`197` indicates that ``docker kill`` may leave directories - behind and make it difficult to remove the container. -* :issue:`3844` lxc 1.0.0 beta3 removed ``lcx-kill`` which is used by Docker versions before 0.8.0; - see the issue for a workaround. - -.. _cli_load: - -``load`` --------- - -:: - - Usage: docker load - - Load an image from a tar archive on STDIN - - -i, --input="": Read from a tar archive file, instead of STDIN - -Loads a tarred repository from a file or the standard input stream. -Restores both images and tags. - -.. code-block:: bash - - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - $ sudo docker load < busybox.tar - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - $ sudo docker load --input fedora.tar - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB - - -.. _cli_login: - -``login`` ---------- - -:: - - Usage: docker login [OPTIONS] [SERVER] - - Register or Login to the docker registry server - - -e, --email="": Email - -p, --password="": Password - -u, --username="": Username - - If you want to login to a private registry you can - specify this by adding the server name. - - example: - docker login localhost:8080 - - -.. _cli_logs: - -``logs`` --------- - -:: - - Usage: docker logs [OPTIONS] CONTAINER - - Fetch the logs of a container - - -f, --follow=false: Follow log output - -The ``docker logs`` command batch-retrieves all logs present at the time of execution. - -The ``docker logs --follow`` command combines ``docker logs`` and ``docker attach``: -it will first return all logs from the beginning and then continue streaming -new output from the container's stdout and stderr. - - -.. _cli_port: - -``port`` --------- - -:: - - Usage: docker port [OPTIONS] CONTAINER PRIVATE_PORT - - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT - - -.. _cli_ps: - -``ps`` ------- - -:: - - Usage: docker ps [OPTIONS] - - List containers - - -a, --all=false: Show all containers. Only running containers are shown by default. - --before="": Show only container created before Id or Name, include non-running ones. - -l, --latest=false: Show only the latest created container, include non-running ones. - -n=-1: Show n last created containers, include non-running ones. - --no-trunc=false: Don't truncate output - -q, --quiet=false: Only display numeric IDs - -s, --size=false: Display sizes, not to be used with -q - --since="": Show only containers created since Id or Name, include non-running ones. - - -Running ``docker ps`` showing 2 linked containers. - -.. code-block:: bash - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp - d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db - -``docker ps`` will show only running containers by default. To see all containers: ``docker ps -a`` - -.. _cli_pull: - -``pull`` --------- - -:: - - Usage: docker pull NAME[:TAG] - - Pull an image or a repository from the registry - -Most of your images will be created on top of a base image from the -(https://index.docker.io). - -The Docker Index contains many pre-built images that you can ``pull`` and try -without needing to define and configure your own. - -To download a particular image, or set of images (i.e., a repository), -use ``docker pull``: - -.. code-block:: bash - - $ docker pull debian - # will pull all the images in the debian repository - $ docker pull debian:testing - # will pull only the image named debian:testing and any intermediate layers - # it is based on. (typically the empty `scratch` image, a MAINTAINERs layer, - # and the un-tared base. - -.. _cli_push: - -``push`` --------- - -:: - - Usage: docker push NAME[:TAG] - - Push an image or a repository to the registry - -Use ``docker push`` to share your images on public or private registries. - -.. _cli_restart: - -``restart`` ------------ - -:: - - Usage: docker restart [OPTIONS] NAME - - Restart a running container - - -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 - -.. _cli_rm: - -``rm`` ------- - -:: - - Usage: docker rm [OPTIONS] CONTAINER - - Remove one or more containers - -l, --link="": Remove the link instead of the actual container - -f, --force=false: Force removal of running container - -v, --volumes=false: Remove the volumes associated to the container - -Known Issues (rm) -~~~~~~~~~~~~~~~~~ - -* :issue:`197` indicates that ``docker kill`` may leave directories - behind and make it difficult to remove the container. - - -Examples: -~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker rm /redis - /redis - - -This will remove the container referenced under the link ``/redis``. - - -.. code-block:: bash - - $ sudo docker rm --link /webapp/redis - /webapp/redis - - -This will remove the underlying link between ``/webapp`` and the ``/redis`` containers removing all -network communication. - -.. code-block:: bash - - $ sudo docker rm $(docker ps -a -q) - - -This command will delete all stopped containers. The command ``docker ps -a -q`` will return all -existing container IDs and pass them to the ``rm`` command which will delete them. Any running -containers will not be deleted. - -.. _cli_rmi: - -``rmi`` -------- - -:: - - Usage: docker rmi IMAGE [IMAGE...] - - Remove one or more images - - -f, --force=false: Force - --no-prune=false: Do not delete untagged parents - -Removing tagged images -~~~~~~~~~~~~~~~~~~~~~~ - -Images can be removed either by their short or long ID's, or their image names. -If an image has more than one name, each of them needs to be removed before the -image is removed. - -.. code-block:: bash - - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ sudo docker rmi fd484f19954f - Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories - 2013/12/11 05:47:16 Error: failed to remove one or more images - - $ sudo docker rmi test1 - Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - $ sudo docker rmi test2 - Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - - $ sudo docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - $ sudo docker rmi test - Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - - -.. _cli_run: - -``run`` -------- - -:: - - Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - - Run a command in a new container - - -a, --attach=map[]: Attach to stdin, stdout or stderr - -c, --cpu-shares=0: CPU shares (relative weight) - --cidfile="": Write the container ID to the file - -d, --detach=false: Detached mode: Run container in the background, print new container id - -e, --env=[]: Set environment variables - --env-file="": Read in a line delimited file of ENV variables - -h, --hostname="": Container host name - -i, --interactive=false: Keep stdin open even if not attached - --privileged=false: Give extended privileges to this container - -m, --memory="": Memory limit (format: , where unit = b, k, m or g) - -n, --networking=true: Enable networking for this container - -p, --publish=[]: Map a network port to the container - --rm=false: Automatically remove the container when it exits (incompatible with -d) - -t, --tty=false: Allocate a pseudo-tty - -u, --user="": Username or UID - --dns=[]: Set custom dns servers for the container - --dns-search=[]: Set custom DNS search domains for the container - -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. - --volumes-from="": Mount all volumes from the given container(s) - --entrypoint="": Overwrite the default entrypoint set by the image - -w, --workdir="": Working directory inside the container - --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - --expose=[]: Expose a port from the container without publishing it to your host - --link="": Add link to another container (name:alias) - --name="": Assign the specified name to the container. If no name is specific docker will generate a random name - -P, --publish-all=false: Publish all exposed ports to the host interfaces - -The ``docker run`` command first ``creates`` a writeable container layer over -the specified image, and then ``starts`` it using the specified command. That -is, ``docker run`` is equivalent to the API ``/containers/create`` then -``/containers/(id)/start``. -A stopped container can be restarted with all its previous changes intact using -``docker start``. See ``docker ps -a`` to view a list of all containers. - -The ``docker run`` command can be used in combination with ``docker commit`` to -:ref:`change the command that a container runs `. - -See :ref:`port_redirection` for more detailed information about the ``--expose``, -``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for -specific examples using ``--link``. - -Known Issues (run --volumes-from) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* :issue:`2702`: "lxc-start: Permission denied - failed to mount" - could indicate a permissions problem with AppArmor. Please see the - issue for a workaround. - -Examples: -~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" - -This will create a container and print ``test`` to the console. The -``cidfile`` flag makes Docker attempt to create a new file and write the -container ID to it. If the file exists already, Docker will return an -error. Docker will close this file when ``docker run`` exits. - -.. code-block:: bash - - $ sudo docker run -t -i --rm ubuntu bash - root@bc338942ef20:/# mount -t tmpfs none /mnt - mount: permission denied - - -This will *not* work, because by default, most potentially dangerous -kernel capabilities are dropped; including ``cap_sys_admin`` (which is -required to mount filesystems). However, the ``--privileged`` flag will -allow it to run: - -.. code-block:: bash - - $ sudo docker run --privileged ubuntu bash - root@50e3f57e16e6:/# mount -t tmpfs none /mnt - root@50e3f57e16e6:/# df -h - Filesystem Size Used Avail Use% Mounted on - none 1.9G 0 1.9G 0% /mnt - - -The ``--privileged`` flag gives *all* capabilities to the container, -and it also lifts all the limitations enforced by the ``device`` -cgroup controller. In other words, the container can then do almost -everything that the host can do. This flag exists to allow special -use-cases, like running Docker within Docker. - -.. code-block:: bash - - $ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd - -The ``-w`` lets the command being executed inside directory given, -here ``/path/to/dir/``. If the path does not exists it is created inside the -container. - -.. code-block:: bash - - $ sudo docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd - -The ``-v`` flag mounts the current working directory into the container. -The ``-w`` lets the command being executed inside the current -working directory, by changing into the directory to the value -returned by ``pwd``. So this combination executes the command -using the container, but inside the current working directory. - -.. code-block:: bash - - $ sudo docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash - -When the host directory of a bind-mounted volume doesn't exist, Docker -will automatically create this directory on the host for you. In the -example above, Docker will create the ``/doesnt/exist`` folder before -starting your container. - -.. code-block:: bash - - $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh - -By bind-mounting the docker unix socket and statically linked docker binary -(such as that provided by https://get.docker.io), you give the container -the full access to create and manipulate the host's docker daemon. - -.. code-block:: bash - - $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash - -This binds port ``8080`` of the container to port ``80`` on ``127.0.0.1`` of the -host machine. :ref:`port_redirection` explains in detail how to manipulate ports -in Docker. - -.. code-block:: bash - - $ sudo docker run --expose 80 ubuntu bash - -This exposes port ``80`` of the container for use within a link without -publishing the port to the host system's interfaces. :ref:`port_redirection` -explains in detail how to manipulate ports in Docker. - -.. code-block:: bash - - $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash - -This sets environmental variables in the container. For illustration all three -flags are shown here. Where ``-e``, ``--env`` take an environment variable and -value, or if no "=" is provided, then that variable's current value is passed -through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All -three flags, ``-e``, ``--env`` and ``--env-file`` can be repeated. - -Regardless of the order of these three flags, the ``--env-file`` are processed -first, and then ``-e``/``--env`` flags. This way, the ``-e`` or ``--env`` will -override variables as needed. - -.. code-block:: bash - - $ cat ./env.list - TEST_FOO=BAR - $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO - TEST_FOO=This is a test - -The ``--env-file`` flag takes a filename as an argument and expects each line -to be in the VAR=VAL format, mimicking the argument passed to ``--env``. -Comment lines need only be prefixed with ``#`` - -An example of a file passed with ``--env-file`` - -.. code-block:: bash - - $ cat ./env.list - TEST_FOO=BAR - - # this is a comment - TEST_APP_DEST_HOST=10.10.0.127 - TEST_APP_DEST_PORT=8888 - - # pass through this variable from the caller - TEST_PASSTHROUGH - $ sudo TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env - HOME=/ - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - HOSTNAME=5198e0745561 - TEST_FOO=BAR - TEST_APP_DEST_HOST=10.10.0.127 - TEST_APP_DEST_PORT=8888 - TEST_PASSTHROUGH=howdy - - -.. code-block:: bash - - $ sudo docker run --name console -t -i ubuntu bash - -This will create and run a new container with the container name -being ``console``. - -.. code-block:: bash - - $ sudo docker run --link /redis:redis --name console ubuntu bash - -The ``--link`` flag will link the container named ``/redis`` into the -newly created container with the alias ``redis``. The new container -can access the network and environment of the redis container via -environment variables. The ``--name`` flag will assign the name ``console`` -to the newly created container. - -.. code-block:: bash - - $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd - -The ``--volumes-from`` flag mounts all the defined volumes from the -referenced containers. Containers can be specified by a comma separated -list or by repetitions of the ``--volumes-from`` argument. The container -ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in -read-only or read-write mode, respectively. By default, the volumes are mounted -in the same mode (read write or read only) as the reference container. - -The ``-a`` flag tells ``docker run`` to bind to the container's stdin, stdout -or stderr. This makes it possible to manipulate the output and input as needed. - -.. code-block:: bash - - $ sudo echo "test" | docker run -i -a stdin ubuntu cat - - -This pipes data into a container and prints the container's ID by attaching -only to the container's stdin. - -.. code-block:: bash - - $ sudo docker run -a stderr ubuntu echo test - -This isn't going to print anything unless there's an error because we've only -attached to the stderr of the container. The container's logs still store -what's been written to stderr and stdout. - -.. code-block:: bash - - $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild - -This is how piping a file into a container could be done for a build. -The container's ID will be printed after the build is done and the build logs -could be retrieved using ``docker logs``. This is useful if you need to pipe -a file or something else into a container and retrieve the container's ID once -the container has finished running. - - -A complete example -.................. - -.. code-block:: bash - - $ sudo docker run -d --name static static-web-files sh - $ sudo docker run -d --expose=8098 --name riak riakserver - $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver - $ sudo docker run -d -p 1443:443 --dns=dns.dev.org --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver - $ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log - -This example shows 5 containers that might be set up to test a web application change: - -1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files); -2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it; -3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``; -4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org`` and DNS search domain to ``dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate; -5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed. - - -.. _cli_save: - -``save`` ---------- - -:: - - Usage: docker save IMAGE - - Save an image to a tar archive (streamed to stdout by default) - - -o, --output="": Write to an file, instead of STDOUT - - -Produces a tarred repository to the standard output stream. -Contains all parent layers, and all tags + versions, or specified repo:tag. - -It is used to create a backup that can then be used with ``docker load`` - -.. code-block:: bash - - $ sudo docker save busybox > busybox.tar - $ ls -sh b.tar - 2.7M b.tar - $ sudo docker save --output busybox.tar busybox - $ ls -sh b.tar - 2.7M b.tar - $ sudo docker save -o fedora-all.tar fedora - $ sudo docker save -o fedora-latest.tar fedora:latest - - -.. _cli_search: - -``search`` ----------- - -:: - - Usage: docker search TERM - - Search the docker index for images - - --no-trunc=false: Don't truncate output - -s, --stars=0: Only displays with at least xxx stars - -t, --trusted=false: Only show trusted builds - -See :ref:`searching_central_index` for more details on finding shared images -from the commandline. - -.. _cli_start: - -``start`` ---------- - -:: - - Usage: docker start [OPTIONS] CONTAINER - - Start a stopped container - - -a, --attach=false: Attach container᾿s stdout/stderr and forward all signals to the process - -i, --interactive=false: Attach container᾿s stdin - -.. _cli_stop: - -``stop`` --------- - -:: - - Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - - Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - - -t, --time=10: Number of seconds to wait for the container to stop before killing it. - -The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL - -.. _cli_tag: - -``tag`` -------- - -:: - - Usage: docker tag [OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG] - - Tag an image into a repository - - -f, --force=false: Force - -You can group your images together using names and -tags, and then upload them to :ref:`working_with_the_repository`. - -.. _cli_top: - -``top`` -------- - -:: - - Usage: docker top CONTAINER [ps OPTIONS] - - Lookup the running processes of a container - -.. _cli_version: - -``version`` ------------ - -Show the version of the Docker client, daemon, and latest released version. - - -.. _cli_wait: - -``wait`` --------- - -:: - - Usage: docker wait [OPTIONS] NAME - - Block until a container stops, then print its exit code. diff --git a/docs/sources/reference/commandline/index.rst b/docs/sources/reference/commandline/index.rst deleted file mode 100644 index 5536e1012e..0000000000 --- a/docs/sources/reference/commandline/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -:title: Commands -:description: docker command line interface -:keywords: commands, command line, help, docker - - -Commands -======== - -Contents: - -.. toctree:: - :maxdepth: 1 - - cli diff --git a/docs/sources/reference/index.rst b/docs/sources/reference/index.rst deleted file mode 100644 index d35a19b93d..0000000000 --- a/docs/sources/reference/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -:title: Docker Reference Manual -:description: References -:keywords: docker, references, api, command line, commands - -.. _references: - -Reference Manual -================ - -Contents: - -.. toctree:: - :maxdepth: 1 - - commandline/index - builder - run - api/index diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst deleted file mode 100644 index 0e6247ea28..0000000000 --- a/docs/sources/reference/run.rst +++ /dev/null @@ -1,418 +0,0 @@ -:title: Docker Run Reference -:description: Configure containers at runtime -:keywords: docker, run, configure, runtime - -.. _run_docker: - -==================== -Docker Run Reference -==================== - -**Docker runs processes in isolated containers**. When an operator -executes ``docker run``, she starts a process with its own file -system, its own networking, and its own isolated process tree. The -:ref:`image_def` which starts the process may define defaults related -to the binary to run, the networking to expose, and more, but ``docker -run`` gives final control to the operator who starts the container -from the image. That's the main reason :ref:`cli_run` has more options -than any other ``docker`` command. - -Every one of the :ref:`example_list` shows running containers, and so -here we try to give more in-depth guidance. - -.. _run_running: - -General Form -============ - -As you've seen in the :ref:`example_list`, the basic `run` command -takes this form:: - - docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - -To learn how to interpret the types of ``[OPTIONS]``, see -:ref:`cli_options`. - -The list of ``[OPTIONS]`` breaks down into two groups: - -1. Settings exclusive to operators, including: - - * Detached or Foreground running, - * Container Identification, - * Network settings, and - * Runtime Constraints on CPU and Memory - * Privileges and LXC Configuration - -2. Setting shared between operators and developers, where operators - can override defaults developers set in images at build time. - -Together, the ``docker run [OPTIONS]`` give complete control over -runtime behavior to the operator, allowing them to override all -defaults set by the developer during ``docker build`` and nearly all -the defaults set by the Docker runtime itself. - -Operator Exclusive Options -========================== - -Only the operator (the person executing ``docker run``) can set the -following options. - -.. contents:: - :local: - -Detached vs Foreground ----------------------- - -When starting a Docker container, you must first decide if you want to -run the container in the background in a "detached" mode or in the -default foreground mode:: - - -d=false: Detached mode: Run container in the background, print new container id - -Detached (-d) -............. - -In detached mode (``-d=true`` or just ``-d``), all I/O should be done -through network connections or shared volumes because the container is -no longer listening to the commandline where you executed ``docker -run``. You can reattach to a detached container with ``docker`` -:ref:`cli_attach`. If you choose to run a container in the detached -mode, then you cannot use the ``--rm`` option. - -Foreground -.......... - -In foreground mode (the default when ``-d`` is not specified), -``docker run`` can start the process in the container and attach the -console to the process's standard input, output, and standard -error. It can even pretend to be a TTY (this is what most commandline -executables expect) and pass along signals. All of that is -configurable:: - - -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` - -t=false : Allocate a pseudo-tty - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -i=false : Keep STDIN open even if not attached - -If you do not specify ``-a`` then Docker will `attach everything -(stdin,stdout,stderr) -`_. You -can specify to which of the three standard streams (``stdin``, ``stdout``, -``stderr``) you'd like to connect instead, as in:: - - docker run -a stdin -a stdout -i -t ubuntu /bin/bash - -For interactive processes (like a shell) you will typically want a tty -as well as persistent standard input (``stdin``), so you'll use ``-i --t`` together in most interactive cases. - -Container Identification ------------------------- - -Name (--name) -............. - -The operator can identify a container in three ways: - -* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") -* UUID short identifier ("f78375b1c487") -* Name ("evil_ptolemy") - -The UUID identifiers come from the Docker daemon, and if you do not -assign a name to the container with ``--name`` then the daemon will -also generate a random string name too. The name can become a handy -way to add meaning to a container since you can use this name when -defining :ref:`links ` (or any other place -you need to identify a container). This works for both background and -foreground Docker containers. - -PID Equivalent -.............. - -And finally, to help with automation, you can have Docker write the -container ID out to a file of your choosing. This is similar to how -some programs might write out their process ID to a file (you've seen -them as PID files):: - - --cidfile="": Write the container ID to the file - -Network Settings ----------------- - -:: - - -n=true : Enable networking for this container - --dns=[] : Set custom dns servers for the container - -By default, all containers have networking enabled and they can make -any outgoing connections. The operator can completely disable -networking with ``docker run -n`` which disables all incoming and outgoing -networking. In cases like this, you would perform I/O through files or -STDIN/STDOUT only. - -Your container will use the same DNS servers as the host by default, -but you can override this with ``--dns``. - -Clean Up (--rm) ---------------- - -By default a container's file system persists even after the container -exits. This makes debugging a lot easier (since you can inspect the -final state) and you retain all your data by default. But if you are -running short-term **foreground** processes, these container file -systems can really pile up. If instead you'd like Docker to -**automatically clean up the container and remove the file system when -the container exits**, you can add the ``--rm`` flag:: - - --rm=false: Automatically remove the container when it exits (incompatible with -d) - - -Runtime Constraints on CPU and Memory -------------------------------------- - -The operator can also adjust the performance parameters of the container:: - - -m="": Memory limit (format: , where unit = b, k, m or g) - -c=0 : CPU shares (relative weight) - -The operator can constrain the memory available to a container easily -with ``docker run -m``. If the host supports swap memory, then the -``-m`` memory setting can be larger than physical RAM. - -Similarly the operator can increase the priority of this container -with the ``-c`` option. By default, all containers run at the same -priority and get the same proportion of CPU cycles, but you can tell -the kernel to give more shares of CPU time to one or more containers -when you start them via Docker. - -Runtime Privilege and LXC Configuration ---------------------------------------- - -:: - - --privileged=false: Give extended privileges to this container - --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -By default, Docker containers are "unprivileged" and cannot, for -example, run a Docker daemon inside a Docker container. This is -because by default a container is not allowed to access any devices, -but a "privileged" container is given access to all devices (see -lxc-template.go_ and documentation on `cgroups devices -`_). - -When the operator executes ``docker run --privileged``, Docker will -enable to access to all devices on the host as well as set some -configuration in AppArmor to allow the container nearly all the same -access to the host as processes running outside containers on the -host. Additional information about running with ``--privileged`` is -available on the `Docker Blog -`_. - -If the Docker daemon was started using the ``lxc`` exec-driver -(``docker -d --exec-driver=lxc``) then the operator can also specify -LXC options using one or more ``--lxc-conf`` parameters. These can be -new parameters or override existing parameters from the lxc-template.go_. -Note that in the future, a given host's Docker daemon may not use LXC, -so this is an implementation-specific configuration meant for operators -already familiar with using LXC directly. - -.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go - - -Overriding ``Dockerfile`` Image Defaults -======================================== - -When a developer builds an image from a :ref:`Dockerfile -` or when she commits it, the developer can set a -number of default parameters that take effect when the image starts up -as a container. - -Four of the ``Dockerfile`` commands cannot be overridden at runtime: -``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a -corresponding override in ``docker run``. We'll go through what the -developer might have set in each ``Dockerfile`` instruction and how the -operator can override that setting. - -.. contents:: - :local: - -CMD (Default Command or Options) --------------------------------- - -Recall the optional ``COMMAND`` in the Docker commandline:: - - docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - -This command is optional because the person who created the ``IMAGE`` -may have already provided a default ``COMMAND`` using the ``Dockerfile`` -``CMD``. As the operator (the person running a container from the -image), you can override that ``CMD`` just by specifying a new -``COMMAND``. - -If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or -``COMMAND`` get appended as arguments to the ``ENTRYPOINT``. - - -ENTRYPOINT (Default Command to Execute at Runtime -------------------------------------------------- - -:: - - --entrypoint="": Overwrite the default entrypoint set by the image - -The ENTRYPOINT of an image is similar to a ``COMMAND`` because it -specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ``ENTRYPOINT`` gives a -container its default nature or behavior, so that when you set an -``ENTRYPOINT`` you can run the container *as if it were that binary*, -complete with default options, and you can pass in more options via -the ``COMMAND``. But, sometimes an operator may want to run something else -inside the container, so you can override the default ``ENTRYPOINT`` at -runtime by using a string to specify the new ``ENTRYPOINT``. Here is an -example of how to run a shell in a container that has been set up to -automatically run something else (like ``/usr/bin/redis-server``):: - - docker run -i -t --entrypoint /bin/bash example/redis - -or two examples of how to pass more parameters to that ENTRYPOINT:: - - docker run -i -t --entrypoint /bin/bash example/redis -c ls -l - docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help - - -EXPOSE (Incoming Ports) ------------------------ - -The ``Dockerfile`` doesn't give much control over networking, only -providing the ``EXPOSE`` instruction to give a hint to the operator -about what incoming ports might provide services. The following -options work with or override the ``Dockerfile``'s exposed defaults:: - - --expose=[]: Expose a port from the container - without publishing it to your host - -P=false : Publish all exposed ports to the host interfaces - -p=[] : Publish a container's port to the host (format: - ip:hostPort:containerPort | ip::containerPort | - hostPort:containerPort) - (use 'docker port' to see the actual mapping) - --link="" : Add link to another container (name:alias) - -As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port -available **in** a container for incoming connections. The port number -on the inside of the container (where the service listens) does not -need to be the same number as the port exposed on the outside of the -container (where clients connect), so inside the container you might -have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in -the ``Dockerfile``), but outside the container the port might be 42800. - -To help a new client container reach the server container's internal -port operator ``--expose``'d by the operator or ``EXPOSE``'d by the -developer, the operator has three choices: start the server container -with ``-P`` or ``-p,`` or start the client container with ``--link``. - -If the operator uses ``-P`` or ``-p`` then Docker will make the -exposed port accessible on the host and the ports will be available to -any client that can reach the host. To find the map between the host -ports and the exposed ports, use ``docker port``) - -If the operator uses ``--link`` when starting the new client container, -then the client container can access the exposed port via a private -networking interface. Docker will set some environment variables in -the client container to help indicate which interface and port to use. - -ENV (Environment Variables) ---------------------------- - -The operator can **set any environment variable** in the container by -using one or more ``-e`` flags, even overriding those already defined by the -developer with a Dockefile ``ENV``:: - - $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export - declare -x HOME="/" - declare -x HOSTNAME="85bc26a0e200" - declare -x OLDPWD - declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - declare -x PWD="/" - declare -x SHLVL="1" - declare -x container="lxc" - declare -x deep="purple" - -Similarly the operator can set the **hostname** with ``-h``. - -``--link name:alias`` also sets environment variables, using the -*alias* string to define environment variables within the container -that give the IP and PORT information for connecting to the service -container. Let's imagine we have a container running Redis:: - - # Start the service container, named redis-name - $ docker run -d --name redis-name dockerfiles/redis - 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 - - # The redis-name container exposed port 6379 - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name - - # Note that there are no public ports exposed since we didn't use -p or -P - $ docker port 4241164edf6f 6379 - 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f - - -Yet we can get information about the Redis container's exposed ports -with ``--link``. Choose an alias that will form a valid environment -variable! - -:: - - $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export - declare -x HOME="/" - declare -x HOSTNAME="acda7f7b1cdc" - declare -x OLDPWD - declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - declare -x PWD="/" - declare -x REDIS_ALIAS_NAME="/distracted_wright/redis" - declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379" - declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379" - declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32" - declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379" - declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp" - declare -x SHLVL="1" - declare -x container="lxc" - -And we can use that information to connect from another container as a client:: - - $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' - 172.17.0.32:6379> - -VOLUME (Shared Filesystems) ---------------------------- - -:: - - -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. - If "container-dir" is missing, then docker creates a new volume. - --volumes-from="": Mount all volumes from the given container(s) - -The volumes commands are complex enough to have their own -documentation in section :ref:`volume_def`. A developer can define one -or more ``VOLUME``\s associated with an image, but only the operator can -give access from one container to another (or from a container to a -volume mounted on the host). - -USER ----- - -The default user within a container is ``root`` (id = 0), but if the -developer created additional users, those are accessible too. The -developer can set a default user to run the first process with the -``Dockerfile USER`` command, but the operator can override it :: - - -u="": Username or UID - -WORKDIR -------- - -The default working directory for running binaries within a container is the root directory (``/``), but the developer can set a different default with the ``Dockerfile WORKDIR`` command. The operator can override this with:: - - -w="": Working directory inside the container - diff --git a/docs/sources/terms/container.rst b/docs/sources/terms/container.rst deleted file mode 100644 index 206664bd82..0000000000 --- a/docs/sources/terms/container.rst +++ /dev/null @@ -1,47 +0,0 @@ -:title: Container -:description: Definitions of a container -:keywords: containers, lxc, concepts, explanation, image, container - -.. _container_def: - -Container -========= - -.. image:: images/docker-filesystems-busyboxrw.png - -Once you start a process in Docker from an :ref:`image_def`, Docker -fetches the image and its :ref:`parent_image_def`, and repeats the -process until it reaches the :ref:`base_image_def`. Then the -:ref:`ufs_def` adds a read-write layer on top. That read-write layer, -plus the information about its :ref:`parent_image_def` and some -additional information like its unique id, networking configuration, -and resource limits is called a **container**. - -.. _container_state_def: - -Container State -............... - -Containers can change, and so they have state. A container may be -**running** or **exited**. - -When a container is running, the idea of a "container" also includes a -tree of processes running on the CPU, isolated from the other -processes running on the host. - -When the container is exited, the state of the file system and -its exit value is preserved. You can start, stop, and restart a -container. The processes restart from scratch (their memory state is -**not** preserved in a container), but the file system is just as it -was when the container was stopped. - -You can promote a container to an :ref:`image_def` with ``docker -commit``. Once a container is an image, you can use it as a parent for -new containers. - -Container IDs -............. -All containers are identified by a 64 hexadecimal digit string (internally a 256bit -value). To simplify their use, a short ID of the first 12 characters can be used -on the commandline. There is a small possibility of short id collisions, so the -docker server will always return the long ID. diff --git a/docs/sources/terms/filesystem.rst b/docs/sources/terms/filesystem.rst deleted file mode 100644 index 0af893f198..0000000000 --- a/docs/sources/terms/filesystem.rst +++ /dev/null @@ -1,38 +0,0 @@ -:title: File Systems -:description: How Linux organizes its persistent storage -:keywords: containers, files, linux - -.. _filesystem_def: - -File System -=========== - -.. image:: images/docker-filesystems-generic.png - -In order for a Linux system to run, it typically needs two `file -systems `_: - -1. boot file system (bootfs) -2. root file system (rootfs) - -The **boot file system** contains the bootloader and the kernel. The -user never makes any changes to the boot file system. In fact, soon -after the boot process is complete, the entire kernel is in memory, -and the boot file system is unmounted to free up the RAM associated -with the initrd disk image. - - -The **root file system** includes the typical directory structure we -associate with Unix-like operating systems: ``/dev, /proc, /bin, /etc, -/lib, /usr,`` and ``/tmp`` plus all the configuration files, binaries -and libraries required to run user applications (like bash, ls, and so -forth). - -While there can be important kernel differences between different -Linux distributions, the contents and organization of the root file -system are usually what make your software packages dependent on one -distribution versus another. Docker can help solve this problem by -running multiple distributions at the same time. - -.. image:: images/docker-filesystems-multiroot.png - diff --git a/docs/sources/terms/image.rst b/docs/sources/terms/image.rst deleted file mode 100644 index 6d5c8b2e7c..0000000000 --- a/docs/sources/terms/image.rst +++ /dev/null @@ -1,46 +0,0 @@ -:title: Images -:description: Definition of an image -:keywords: containers, lxc, concepts, explanation, image, container - -.. _image_def: - -Image -===== - -.. image:: images/docker-filesystems-debian.png - -In Docker terminology, a read-only :ref:`layer_def` is called an -**image**. An image never changes. - -Since Docker uses a :ref:`ufs_def`, the processes think the whole file -system is mounted read-write. But all the changes go to the top-most -writeable layer, and underneath, the original file in the read-only -image is unchanged. Since images don't change, images do not have state. - -.. image:: images/docker-filesystems-debianrw.png - -.. _parent_image_def: - -Parent Image -............ - -.. image:: images/docker-filesystems-multilayer.png - -Each image may depend on one more image which forms the layer beneath -it. We sometimes say that the lower image is the **parent** of the -upper image. - -.. _base_image_def: - -Base Image -.......... - -An image that has no parent is a **base image**. - -Image IDs -......... -All images are identified by a 64 hexadecimal digit string (internally a 256bit -value). To simplify their use, a short ID of the first 12 characters can be used -on the command line. There is a small possibility of short id collisions, so the -docker server will always return the long ID. - diff --git a/docs/sources/terms/index.rst b/docs/sources/terms/index.rst deleted file mode 100644 index 40851082b5..0000000000 --- a/docs/sources/terms/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:title: Glossary -:description: Definitions of terms used in Docker documentation -:keywords: concepts, documentation, docker, containers - - - -Glossary -======== - -Definitions of terms used in Docker documentation. - -Contents: - -.. toctree:: - :maxdepth: 1 - - filesystem - layer - image - container - registry - repository - - diff --git a/docs/sources/terms/layer.rst b/docs/sources/terms/layer.rst deleted file mode 100644 index 509dbe5cba..0000000000 --- a/docs/sources/terms/layer.rst +++ /dev/null @@ -1,40 +0,0 @@ -:title: Layers -:description: Organizing the Docker Root File System -:keywords: containers, lxc, concepts, explanation, image, container - -Layers -====== - -In a traditional Linux boot, the kernel first mounts the root -:ref:`filesystem_def` as read-only, checks its integrity, and then -switches the whole rootfs volume to read-write mode. - -.. _layer_def: - -Layer -..... - -When Docker mounts the rootfs, it starts read-only, as in a traditional -Linux boot, but then, instead of changing the file system to -read-write mode, it takes advantage of a `union mount -`_ to add a read-write file -system *over* the read-only file system. In fact there may be multiple -read-only file systems stacked on top of each other. We think of each -one of these file systems as a **layer**. - -.. image:: images/docker-filesystems-multilayer.png - -At first, the top read-write layer has nothing in it, but any time a -process creates a file, this happens in the top layer. And if -something needs to update an existing file in a lower layer, then the -file gets copied to the upper layer and changes go into the copy. The -version of the file on the lower layer cannot be seen by the -applications anymore, but it is there, unchanged. - -.. _ufs_def: - -Union File System -................. - -We call the union of the read-write layer and all the read-only layers -a **union file system**. diff --git a/docs/sources/terms/registry.rst b/docs/sources/terms/registry.rst deleted file mode 100644 index 90c3ee721c..0000000000 --- a/docs/sources/terms/registry.rst +++ /dev/null @@ -1,16 +0,0 @@ -:title: Registry -:description: Definition of an Registry -:keywords: containers, lxc, concepts, explanation, image, repository, container - -.. _registry_def: - -Registry -========== - -A Registry is a hosted service containing :ref:`repositories` -of :ref:`images` which responds to the Registry API. - -The default registry can be accessed using a browser at http://images.docker.io -or using the ``sudo docker search`` command. - -For more information see :ref:`Working with Repositories` diff --git a/docs/sources/terms/repository.rst b/docs/sources/terms/repository.rst deleted file mode 100644 index 04f3950f36..0000000000 --- a/docs/sources/terms/repository.rst +++ /dev/null @@ -1,30 +0,0 @@ -:title: Repository -:description: Definition of an Repository -:keywords: containers, lxc, concepts, explanation, image, repository, container - -.. _repository_def: - -Repository -========== - -A repository is a set of images either on your local Docker server, or -shared, by pushing it to a :ref:`Registry` server. - -Images can be associated with a repository (or multiple) by giving them an image name -using one of three different commands: - -1. At build time (e.g. ``sudo docker build -t IMAGENAME``), -2. When committing a container (e.g. ``sudo docker commit CONTAINERID IMAGENAME``) or -3. When tagging an image id with an image name (e.g. ``sudo docker tag IMAGEID IMAGENAME``). - -A `Fully Qualified Image Name` (FQIN) can be made up of 3 parts: - -``[registry_hostname[:port]/][user_name/](repository_name:version_tag)`` - -``username`` and ``registry_hostname`` default to an empty string. -When ``registry_hostname`` is an empty string, then ``docker push`` will push to ``index.docker.io:80``. - -If you create a new repository which you want to share, you will need to set at least the -``user_name``, as the 'default' blank ``user_name`` prefix is reserved for official Docker images. - -For more information see :ref:`Working with Repositories` diff --git a/docs/sources/toctree.rst b/docs/sources/toctree.rst deleted file mode 100644 index d09bcc313c..0000000000 --- a/docs/sources/toctree.rst +++ /dev/null @@ -1,21 +0,0 @@ -:title: Documentation -:description: -- todo: change me -:keywords: todo, docker, documentation, installation, usage, examples, contributing, faq, command line, concepts - -Documentation -============= - -This documentation has the following resources: - -.. toctree:: - :maxdepth: 1 - - installation/index - use/index - examples/index - reference/index - contributing/index - terms/index - articles/index - faq - diff --git a/docs/sources/use/ambassador_pattern_linking.rst b/docs/sources/use/ambassador_pattern_linking.rst deleted file mode 100644 index bbd5816768..0000000000 --- a/docs/sources/use/ambassador_pattern_linking.rst +++ /dev/null @@ -1,183 +0,0 @@ -:title: Link via an Ambassador Container -:description: Using the Ambassador pattern to abstract (network) services -:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming - -.. _ambassador_pattern_linking: - -Link via an Ambassador Container -================================ - -Rather than hardcoding network links between a service consumer and provider, Docker -encourages service portability. - -eg, instead of - -.. code-block:: bash - - (consumer) --> (redis) - -requiring you to restart the ``consumer`` to attach it to a different ``redis`` service, -you can add ambassadors - -.. code-block:: bash - - (consumer) --> (redis-ambassador) --> (redis) - - or - - (consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis) - -When you need to rewire your consumer to talk to a different redis server, you -can just restart the ``redis-ambassador`` container that the consumer is connected to. - -This pattern also allows you to transparently move the redis server to a different -docker host from the consumer. - -Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely -from the ``docker run`` parameters. - -Two host Example ----------------- - -Start actual redis server on one Docker host - -.. code-block:: bash - - big-server $ docker run -d --name redis crosbymichael/redis - -Then add an ambassador linked to the redis server, mapping a port to the outside world - -.. code-block:: bash - - big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador - -On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server`` - -.. code-block:: bash - - client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador - -Then on the ``client-server`` host, you can use a redis client container to talk -to the remote redis server, just by linking to the local redis ambassador. - -.. code-block:: bash - - client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - - - -How it works ------------- - -The following example shows what the ``svendowideit/ambassador`` container does -automatically (with a tiny amount of ``sed``) - -On the docker host (192.168.1.52) that redis will run on: - -.. code-block:: bash - - # start actual redis server - $ docker run -d --name redis crosbymichael/redis - - # get a redis-cli container for connection testing - $ docker pull relateiq/redis-cli - - # test the redis server by talking to it directly - $ docker run -t -i --rm --link redis:redis relateiq/redis-cli - redis 172.17.0.136:6379> ping - PONG - ^D - - # add redis ambassador - $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh - -in the redis_ambassador container, you can see the linked redis containers's env - -.. code-block:: bash - - $ env - REDIS_PORT=tcp://172.17.0.136:6379 - REDIS_PORT_6379_TCP_ADDR=172.17.0.136 - REDIS_NAME=/redis_ambassador/redis - HOSTNAME=19d7adf4705e - REDIS_PORT_6379_TCP_PORT=6379 - HOME=/ - REDIS_PORT_6379_TCP_PROTO=tcp - container=lxc - REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379 - TERM=xterm - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - - -This environment is used by the ambassador socat script to expose redis to the world -(via the -p 6379:6379 port mapping) - -.. code-block:: bash - - $ docker rm redis_ambassador - $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 - -then ping the redis server via the ambassador - -.. code-block::bash - - $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -Now goto a different server - -.. code-block:: bash - - $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh - - $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 - -and get the redis-cli image so we can talk over the ambassador bridge - -.. code-block:: bash - - $ docker pull relateiq/redis-cli - $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli - redis 172.17.0.160:6379> ping - PONG - -The svendowideit/ambassador Dockerfile --------------------------------------- - -The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in. -When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple) -link environment variables to set up the port forwarding. On the remote host, you need to set the -variable using the ``-e`` command line option. - -``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the -local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``. - - -:: - - # - # - # first you need to build the docker-ut image - # using ./contrib/mkimage-unittest.sh - # then - # docker build -t SvenDowideit/ambassador . - # docker tag SvenDowideit/ambassador ambassador - # then to run it (on the host that has the real backend on it) - # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador - # on the remote host, you can set up another ambassador - # docker run -t -i --name redis_ambassador --expose 6379 sh - - FROM docker-ut - MAINTAINER SvenDowideit@home.org.au - - - CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top - diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst deleted file mode 100644 index 4164e706f7..0000000000 --- a/docs/sources/use/basics.rst +++ /dev/null @@ -1,199 +0,0 @@ -:title: First steps with Docker -:description: Common usage and commands -:keywords: Examples, Usage, basic commands, docker, documentation, examples - - -First steps with Docker -======================= - -Check your Docker install -------------------------- - -This guide assumes you have a working installation of Docker. To check -your Docker install, run the following command: - -.. code-block:: bash - - # Check that you have a working install - docker info - -If you get ``docker: command not found`` or something like -``/var/lib/docker/repositories: permission denied`` you may have an incomplete -docker installation or insufficient privileges to access Docker on your machine. - -Please refer to :ref:`installation_list` for installation instructions. - -Download a pre-built image --------------------------- - -.. code-block:: bash - - # Download an ubuntu image - sudo docker pull ubuntu - -This will find the ``ubuntu`` image by name in the :ref:`Central Index -` and download it from the top-level Central -Repository to a local image cache. - -.. NOTE:: When the image has successfully downloaded, you will see a - 12 character hash ``539c0211cd76: Download complete`` which is the - short form of the image ID. These short image IDs are the first 12 - characters of the full image ID - which can be found using ``docker - inspect`` or ``docker images --no-trunc=true`` - - **If you're using OS X** then you shouldn't use ``sudo`` - -Running an interactive shell ----------------------------- - -.. code-block:: bash - - # Run an interactive shell in the ubuntu image, - # allocate a tty, attach stdin and stdout - # To detach the tty without exiting the shell, - # use the escape sequence Ctrl-p + Ctrl-q - # note: This will continue to exist in a stopped state once exited (see "docker ps -a") - sudo docker run -i -t ubuntu /bin/bash - -.. _bind_docker: - -Bind Docker to another host/port or a Unix socket -------------------------------------------------- - -.. warning:: Changing the default ``docker`` daemon binding to a TCP - port or Unix *docker* user group will increase your security risks - by allowing non-root users to gain *root* access on the - host. Make sure you control access to ``docker``. If you are binding - to a TCP port, anyone with access to that port has full Docker access; - so it is not advisable on an open network. - -With ``-H`` it is possible to make the Docker daemon to listen on a -specific IP and port. By default, it will listen on -``unix:///var/run/docker.sock`` to allow only local connections by the -*root* user. You *could* set it to ``0.0.0.0:4243`` or a specific host IP to -give access to everybody, but that is **not recommended** because then -it is trivial for someone to gain root access to the host where the -daemon is running. - -Similarly, the Docker client can use ``-H`` to connect to a custom port. - -``-H`` accepts host and port assignment in the following format: -``tcp://[host][:port]`` or ``unix://path`` - -For example: - -* ``tcp://host:4243`` -> tcp connection on host:4243 -* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket`` - -``-H``, when empty, will default to the same value as when no ``-H`` was passed in. - -``-H`` also accepts short form for TCP bindings: -``host[:port]`` or ``:port`` - -.. code-block:: bash - - # Run docker in daemon mode - sudo /docker -H 0.0.0.0:5555 -d & - # Download an ubuntu image - sudo docker -H :5555 pull ubuntu - -You can use multiple ``-H``, for example, if you want to listen on -both TCP and a Unix socket - -.. code-block:: bash - - # Run docker in daemon mode - sudo /docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d & - # Download an ubuntu image, use default Unix socket - sudo docker pull ubuntu - # OR use the TCP port - sudo docker -H tcp://127.0.0.1:4243 pull ubuntu - -Starting a long-running worker process --------------------------------------- - -.. code-block:: bash - - # Start a very useful long-running process - JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") - - # Collect the output of the job so far - sudo docker logs $JOB - - # Kill the job - sudo docker kill $JOB - - -Listing containers ------------------- - -.. code-block:: bash - - sudo docker ps # Lists only running containers - sudo docker ps -a # Lists all containers - - -Controlling containers ----------------------- -.. code-block:: bash - - # Start a new container - JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") - - # Stop the container - docker stop $JOB - - # Start the container - docker start $JOB - - # Restart the container - docker restart $JOB - - # SIGKILL a container - docker kill $JOB - - # Remove a container - docker stop $JOB # Container must be stopped to remove it - docker rm $JOB - - -Bind a service on a TCP port ------------------------------- - -.. code-block:: bash - - # Bind port 4444 of this container, and tell netcat to listen on it - JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444) - - # Which public port is NATed to my container? - PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }') - - # Connect to the public port - echo hello world | nc 127.0.0.1 $PORT - - # Verify that the network connection worked - echo "Daemon received: $(sudo docker logs $JOB)" - - -Committing (saving) a container state -------------------------------------- - -Save your containers state to a container image, so the state can be re-used. - -When you commit your container only the differences between the image the -container was created from and the current state of the container will be -stored (as a diff). See which images you already have using the ``docker -images`` command. - -.. code-block:: bash - - # Commit your container to a new named image - sudo docker commit - - # List your containers - sudo docker images - -You now have a image state from which you can create new instances. - -Read more about :ref:`working_with_the_repository` or continue to the -complete :ref:`cli` diff --git a/docs/sources/use/chef.rst b/docs/sources/use/chef.rst deleted file mode 100644 index 919eba7a8f..0000000000 --- a/docs/sources/use/chef.rst +++ /dev/null @@ -1,95 +0,0 @@ -:title: Chef Usage -:description: Installation and using Docker via Chef -:keywords: chef, installation, usage, docker, documentation - -.. _install_using_chef: - -Using Chef -============= - -.. note:: - - Please note this is a community contributed installation path. The - only 'official' installation is using the :ref:`ubuntu_linux` - installation path. This version may sometimes be out of date. - -Requirements ------------- - -To use this guide you'll need a working installation of -`Chef `_. This cookbook supports a variety of -operating systems. - -Installation ------------- - -The cookbook is available on the `Chef Community Site -`_ and can be installed -using your favorite cookbook dependency manager. - -The source can be found on `GitHub -`_. - -Usage ------ - -The cookbook provides recipes for installing Docker, configuring init -for Docker, and resources for managing images and containers. -It supports almost all Docker functionality. - -Installation -~~~~~~~~~~~~ - -.. code-block:: ruby - - include_recipe 'docker' - -Images -~~~~~~ - -The next step is to pull a Docker image. For this, we have a resource: - -.. code-block:: ruby - - docker_image 'samalba/docker-registry' - -This is equivalent to running: - -.. code-block:: bash - - docker pull samalba/docker-registry - -There are attributes available to control how long the cookbook -will allow for downloading (5 minute default). - -To remove images you no longer need: - -.. code-block:: ruby - - docker_image 'samalba/docker-registry' do - action :remove - end - -Containers -~~~~~~~~~~ - -Now you have an image where you can run commands within a container -managed by Docker. - -.. code-block:: ruby - - docker_container 'samalba/docker-registry' do - detach true - port '5000:5000' - env 'SETTINGS_FLAVOR=local' - volume '/mnt/docker:/docker-storage' - end - -This is equivalent to running the following command, but under upstart: - -.. code-block:: bash - - docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry - -The resources will accept a single string or an array of values -for any docker flags that allow multiple values. diff --git a/docs/sources/use/host_integration.rst b/docs/sources/use/host_integration.rst deleted file mode 100644 index cb920a5908..0000000000 --- a/docs/sources/use/host_integration.rst +++ /dev/null @@ -1,74 +0,0 @@ -:title: Automatically Start Containers -:description: How to generate scripts for upstart, systemd, etc. -:keywords: systemd, upstart, supervisor, docker, documentation, host integration - - - -Automatically Start Containers -============================== - -You can use your Docker containers with process managers like ``upstart``, -``systemd`` and ``supervisor``. - -Introduction ------------- - -If you want a process manager to manage your containers you will need to run -the docker daemon with the ``-r=false`` so that docker will not automatically -restart your containers when the host is restarted. - -When you have finished setting up your image and are happy with your -running container, you can then attach a process manager to manage -it. When your run ``docker start -a`` docker will automatically attach -to the running container, or start it if needed and forward all signals -so that the process manager can detect when a container stops and correctly -restart it. - -Here are a few sample scripts for systemd and upstart to integrate with docker. - - -Sample Upstart Script ---------------------- - -In this example we've already created a container to run Redis with -``--name redis_server``. To create an upstart script for our container, -we create a file named ``/etc/init/redis.conf`` and place the following -into it: - -.. code-block:: bash - - description "Redis container" - author "Me" - start on filesystem and started docker - stop on runlevel [!2345] - respawn - script - /usr/bin/docker start -a redis_server - end script - -Next, we have to configure docker so that it's run with the option ``-r=false``. -Run the following command: - -.. code-block:: bash - - $ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker" - - -Sample systemd Script ---------------------- - -.. code-block:: bash - - [Unit] - Description=Redis container - Author=Me - After=docker.service - - [Service] - Restart=always - ExecStart=/usr/bin/docker start -a redis_server - ExecStop=/usr/bin/docker stop -t 2 redis_server - - [Install] - WantedBy=local.target - diff --git a/docs/sources/use/index.rst b/docs/sources/use/index.rst deleted file mode 100644 index dcf6289b41..0000000000 --- a/docs/sources/use/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -:title: Documentation -:description: -- todo: change me -:keywords: todo, docker, documentation, basic, builder - - - -Use -======== - -Contents: - -.. toctree:: - :maxdepth: 1 - - basics - workingwithrepository - port_redirection - networking - host_integration - working_with_volumes - working_with_links_names - ambassador_pattern_linking - chef - puppet diff --git a/docs/sources/use/networking.rst b/docs/sources/use/networking.rst deleted file mode 100644 index 59c63ed674..0000000000 --- a/docs/sources/use/networking.rst +++ /dev/null @@ -1,153 +0,0 @@ -:title: Configure Networking -:description: Docker networking -:keywords: network, networking, bridge, docker, documentation - - -Configure Networking -==================== - -Docker uses Linux bridge capabilities to provide network connectivity -to containers. The ``docker0`` bridge interface is managed by Docker -for this purpose. When the Docker daemon starts it : - -- creates the ``docker0`` bridge if not present -- searches for an IP address range which doesn't overlap with an existing route -- picks an IP in the selected range -- assigns this IP to the ``docker0`` bridge - - -.. code-block:: bash - - # List host bridges - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.000000000000 no - - # Show docker0 IP address - $ sudo ifconfig docker0 - docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0 - - - -At runtime, a :ref:`specific kind of virtual -interface` is given to each container which is then -bonded to the ``docker0`` bridge. Each container also receives a -dedicated IP address from the same range as ``docker0``. The -``docker0`` IP address is used as the default gateway for the -container. - -.. code-block:: bash - - # Run a container - $ sudo docker run -t -i -d base /bin/bash - 52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4 - - $ sudo brctl show - bridge name bridge id STP enabled interfaces - docker0 8000.fef213db5a66 no vethQCDY1N - - -Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface -which is dedicated to the 52f811c5d3d6 container. - - -How to use a specific IP address range ---------------------------------------- - -Docker will try hard to find an IP range that is not used by the -host. Even though it works for most cases, it's not bullet-proof and -sometimes you need to have more control over the IP addressing scheme. - -For this purpose, Docker allows you to manage the ``docker0`` bridge -or your own one using the ``-b=`` parameter. - -In this scenario: - -- ensure Docker is stopped -- create your own bridge (``bridge0`` for example) -- assign a specific IP to this bridge -- start Docker with the ``-b=bridge0`` parameter - - -.. code-block:: bash - - # Stop Docker - $ sudo service docker stop - - # Clean docker0 bridge and - # add your very own bridge0 - $ sudo ifconfig docker0 down - $ sudo brctl addbr bridge0 - $ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0 - - # Edit your Docker startup file - $ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker - - # Start Docker - $ sudo service docker start - - # Ensure bridge0 IP is not changed by Docker - $ sudo ifconfig bridge0 - bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0 - - # Run a container - $ docker run -i -t base /bin/bash - - # Container IP in the 192.168.227/24 range - root@261c272cd7d5:/# ifconfig eth0 - eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx - inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0 - - # bridge0 IP as the default gateway - root@261c272cd7d5:/# route -n - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0 - 192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 - - # hits CTRL+P then CTRL+Q to detach - - # Display bridge info - $ sudo brctl show - bridge name bridge id STP enabled interfaces - bridge0 8000.fe7c2e0faebd no vethAQI2QT - - -Container intercommunication -------------------------------- - -The value of the Docker daemon's ``icc`` parameter determines whether -containers can communicate with each other over the bridge network. - -- The default, ``--icc=true`` allows containers to communicate with each other. -- ``--icc=false`` means containers are isolated from each other. - -Docker uses ``iptables`` under the hood to either accept or -drop communication between containers. - - -.. _vethxxxx-device: - -What is the vethXXXX device? ------------------------------------ -Well. Things get complicated here. - -The ``vethXXXX`` interface is the host side of a point-to-point link -between the host and the corresponding container; the other side of -the link is the container's ``eth0`` -interface. This pair (host ``vethXXX`` and container ``eth0``) are -connected like a tube. Everything that comes in one side will come out -the other side. - -All the plumbing is delegated to Linux network capabilities (check the -ip link command) and the namespaces infrastructure. - - -I want more ------------- - -Jérôme Petazzoni has created ``pipework`` to connect together -containers in arbitrarily complex scenarios : -https://github.com/jpetazzo/pipework diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst deleted file mode 100644 index cf5c2100a9..0000000000 --- a/docs/sources/use/port_redirection.rst +++ /dev/null @@ -1,152 +0,0 @@ -:title: Redirect Ports -:description: usage about port redirection -:keywords: Usage, basic port, docker, documentation, examples - - -.. _port_redirection: - -Redirect Ports -============== - -Interacting with a service is commonly done through a connection to a -port. When this service runs inside a container, one can connect to -the port after finding the IP address of the container as follows: - -.. code-block:: bash - - # Find IP address of container with ID - docker inspect | grep IPAddress | cut -d '"' -f 4 - -However, this IP address is local to the host system and the container -port is not reachable by the outside world. Furthermore, even if the -port is used locally, e.g. by another container, this method is -tedious as the IP address of the container changes every time it -starts. - -Docker addresses these two problems and give a simple and robust way -to access services running inside containers. - -To allow non-local clients to reach the service running inside the -container, Docker provide ways to bind the container port to an -interface of the host system. To simplify communication between -containers, Docker provides the linking mechanism. - -Auto map all exposed ports on the host --------------------------------------- - -To bind all the exposed container ports to the host automatically, use -``docker run -P ``. The mapped host ports will be auto-selected -from a pool of unused ports (49000..49900), and you will need to use -``docker ps``, ``docker inspect `` or -``docker port `` to determine what they are. - -Binding a port to a host interface ------------------------------------ - -To bind a port of the container to a specific interface of the host -system, use the ``-p`` parameter of the ``docker run`` command: - -.. code-block:: bash - - # General syntax - docker run -p [([:[host_port]])|():][/udp] - -When no host interface is provided, the port is bound to all available -interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0).When no host port is -provided, one is dynamically allocated. The possible combinations of options for -TCP port are the following: - -.. code-block:: bash - - # Bind TCP port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1:80:8080 - - # Bind TCP port 8080 of the container to a dynamically allocated TCP port on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1::8080 - - # Bind TCP port 8080 of the container to TCP port 80 on all available interfaces of the host machine. - docker run -p 80:8080 - - # Bind TCP port 8080 of the container to a dynamically allocated TCP port on all available interfaces of the host machine. - docker run -p 8080 - -UDP ports can also be bound by adding a trailing ``/udp``. All the -combinations described for TCP work. Here is only one example: - -.. code-block:: bash - - # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1:53:5353/udp - -The command ``docker port`` lists the interface and port on the host -machine bound to a given container port. It is useful when using -dynamically allocated ports: - -.. code-block:: bash - - # Bind to a dynamically allocated port - docker run -p 127.0.0.1::8080 --name dyn-bound - - # Lookup the actual port - docker port dyn-bound 8080 - 127.0.0.1:49160 - - -Linking a container -------------------- - -Communication between two containers can also be established in a -docker-specific way called linking. - -To briefly present the concept of linking, let us consider two -containers: ``server``, containing the service, and ``client``, -accessing the service. Once ``server`` is running, ``client`` is -started and links to server. Linking sets environment variables in -``client`` giving it some information about ``server``. In this sense, -linking is a method of service discovery. - -Let us now get back to our topic of interest; communication between -the two containers. We mentioned that the tricky part about this -communication was that the IP address of ``server`` was not -fixed. Therefore, some of the environment variables are going to be -used to inform ``client`` about this IP address. This process called -exposure, is possible because ``client`` is started after ``server`` -has been started. - -Here is a full example. On ``server``, the port of interest is -exposed. The exposure is done either through the ``--expose`` parameter -to the ``docker run`` command, or the ``EXPOSE`` build command in a -Dockerfile: - -.. code-block:: bash - - # Expose port 80 - docker run --expose 80 --name server - -The ``client`` then links to the ``server``: - -.. code-block:: bash - - # Link - docker run --name client --link server:linked-server - -``client`` locally refers to ``server`` as ``linked-server``. The -following environment variables, among others, are available on -``client``: - -.. code-block:: bash - - # The default protocol, ip, and port of the service running in the container - LINKED-SERVER_PORT=tcp://172.17.0.8:80 - - # A specific protocol, ip, and port of various services - LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80 - LINKED-SERVER_PORT_80_TCP_PROTO=tcp - LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 - LINKED-SERVER_PORT_80_TCP_PORT=80 - -This tells ``client`` that a service is running on port 80 of -``server`` and that ``server`` is accessible at the IP address -172.17.0.8 - -Note: Using the ``-p`` parameter also exposes the port. diff --git a/docs/sources/use/puppet.rst b/docs/sources/use/puppet.rst deleted file mode 100644 index 4183c14f18..0000000000 --- a/docs/sources/use/puppet.rst +++ /dev/null @@ -1,117 +0,0 @@ -:title: Puppet Usage -:description: Installating and using Puppet -:keywords: puppet, installation, usage, docker, documentation - -.. _install_using_puppet: - -Using Puppet -============= - -.. note:: - - Please note this is a community contributed installation path. The - only 'official' installation is using the :ref:`ubuntu_linux` - installation path. This version may sometimes be out of date. - -Requirements ------------- - -To use this guide you'll need a working installation of Puppet from -`Puppetlabs `_ . - -The module also currently uses the official PPA so only works with Ubuntu. - -Installation ------------- - -The module is available on the `Puppet Forge -`_ and can be installed -using the built-in module tool. - -.. code-block:: bash - - puppet module install garethr/docker - -It can also be found on `GitHub -`_ if you would rather -download the source. - -Usage ------ - -The module provides a puppet class for installing Docker and two defined types -for managing images and containers. - -Installation -~~~~~~~~~~~~ - -.. code-block:: ruby - - include 'docker' - -Images -~~~~~~ - -The next step is probably to install a Docker image. For this, we have a -defined type which can be used like so: - -.. code-block:: ruby - - docker::image { 'ubuntu': } - -This is equivalent to running: - -.. code-block:: bash - - docker pull ubuntu - -Note that it will only be downloaded if an image of that name does -not already exist. This is downloading a large binary so on first -run can take a while. For that reason this define turns off the -default 5 minute timeout for the exec type. Note that you can also -remove images you no longer need with: - -.. code-block:: ruby - - docker::image { 'ubuntu': - ensure => 'absent', - } - -Containers -~~~~~~~~~~ - -Now you have an image where you can run commands within a container -managed by Docker. - -.. code-block:: ruby - - docker::run { 'helloworld': - image => 'ubuntu', - command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', - } - -This is equivalent to running the following command, but under upstart: - -.. code-block:: bash - - docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" - -Run also contains a number of optional parameters: - -.. code-block:: ruby - - docker::run { 'helloworld': - image => 'ubuntu', - command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', - ports => ['4444', '4555'], - volumes => ['/var/lib/couchdb', '/var/log'], - volumes_from => '6446ea52fbc9', - memory_limit => 10485760, # bytes - username => 'example', - hostname => 'example.com', - env => ['FOO=BAR', 'FOO2=BAR2'], - dns => ['8.8.8.8', '8.8.4.4'], - } - -Note that ports, env, dns and volumes can be set with either a single string -or as above with an array of values. diff --git a/docs/sources/use/working_with_links_names.rst b/docs/sources/use/working_with_links_names.rst deleted file mode 100644 index 4acb6079c1..0000000000 --- a/docs/sources/use/working_with_links_names.rst +++ /dev/null @@ -1,132 +0,0 @@ -:title: Link Containers -:description: How to create and use both links and names -:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming - -.. _working_with_links_names: - -Link Containers -=============== - -From version 0.6.5 you are now able to ``name`` a container and -``link`` it to another container by referring to its name. This will -create a parent -> child relationship where the parent container can -see selected information about its child. - -.. _run_name: - -Container Naming ----------------- - -.. versionadded:: v0.6.5 - -You can now name your container by using the ``--name`` flag. If no -name is provided, Docker will automatically generate a name. You can -see this name using the ``docker ps`` command. - -.. code-block:: bash - - # format is "sudo docker run --name " - $ sudo docker run --name test ubuntu /bin/bash - - # the flag "-a" Show all containers. Only running containers are shown by default. - $ sudo docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 2522602a0d99 ubuntu:12.04 /bin/bash 14 seconds ago Exit 0 test - -.. _run_link: - -Links: service discovery for docker ------------------------------------ - -.. versionadded:: v0.6.5 - -Links allow containers to discover and securely communicate with each -other by using the flag ``--link name:alias``. Inter-container -communication can be disabled with the daemon flag -``--icc=false``. With this flag set to ``false``, Container A cannot -access Container B unless explicitly allowed via a link. This is a -huge win for securing your containers. When two containers are linked -together Docker creates a parent child relationship between the -containers. The parent container will be able to access information -via environment variables of the child such as name, exposed ports, IP -and other selected environment variables. - -When linking two containers Docker will use the exposed ports of the -container to create a secure tunnel for the parent to access. If a -database container only exposes port 8080 then the linked container -will only be allowed to access port 8080 and nothing else if -inter-container communication is set to false. - -For example, there is an image called ``crosbymichael/redis`` that exposes the -port 6379 and starts the Redis server. Let's name the container as ``redis`` -based on that image and run it as a daemon. - -.. code-block:: bash - - $ sudo docker run -d --name redis crosbymichael/redis - -We can issue all the commands that you would expect using the name -``redis``; start, stop, attach, using the name for our container. The -name also allows us to link other containers into this one. - -Next, we can start a new web application that has a dependency on -Redis and apply a link to connect both containers. If you noticed when -running our Redis server we did not use the ``-p`` flag to publish the -Redis port to the host system. Redis exposed port 6379 and this is all -we need to establish a link. - -.. code-block:: bash - - $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash - -When you specified ``--link redis:db`` you are telling Docker to link -the container named ``redis`` into this new container with the alias -``db``. Environment variables are prefixed with the alias so that the -parent container can access network and environment information from -the containers that are linked into it. - -If we inspect the environment variables of the second container, we -would see all the information about the child container. - -.. code-block:: bash - - $ root@4c01db0b339c:/# env - - HOSTNAME=4c01db0b339c - DB_NAME=/webapp/db - TERM=xterm - DB_PORT=tcp://172.17.0.8:6379 - DB_PORT_6379_TCP=tcp://172.17.0.8:6379 - DB_PORT_6379_TCP_PROTO=tcp - DB_PORT_6379_TCP_ADDR=172.17.0.8 - DB_PORT_6379_TCP_PORT=6379 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - SHLVL=1 - HOME=/ - container=lxc - _=/usr/bin/env - root@4c01db0b339c:/# - -Accessing the network information along with the environment of the -child container allows us to easily connect to the Redis service on -the specific IP and port in the environment. - -.. note:: - These Environment variables are only set for the first process in - the container. Similarly, some daemons (such as ``sshd``) will - scrub them when spawning shells for connection. - - You can work around this by storing the initial ``env`` in a file, - or looking at ``/proc/1/environ``. - -Running ``docker ps`` shows the 2 containers, and the ``webapp/db`` -alias name for the Redis container. - -.. code-block:: bash - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp - d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db - diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst deleted file mode 100644 index d2f035dc84..0000000000 --- a/docs/sources/use/working_with_volumes.rst +++ /dev/null @@ -1,164 +0,0 @@ -:title: Share Directories via Volumes -:description: How to create and share volumes -:keywords: Examples, Usage, volume, docker, documentation, examples - -.. _volume_def: - -Share Directories via Volumes -============================= - -A *data volume* is a specially-designated directory within one or more -containers that bypasses the :ref:`ufs_def` to provide several useful -features for persistent or shared data: - -* **Data volumes can be shared and reused between containers.** This - is the feature that makes data volumes so powerful. You can use it - for anything from hot database upgrades to custom backup or - replication tools. See the example below. -* **Changes to a data volume are made directly**, without the overhead - of a copy-on-write mechanism. This is good for very large files. -* **Changes to a data volume will not be included at the next commit** - because they are not recorded as regular filesystem changes in the - top layer of the :ref:`ufs_def` -* **Volumes persist until no containers use them** as they are a reference - counted resource. The container does not need to be running to share its - volumes, but running it can help protect it against accidental removal - via ``docker rm``. - -Each container can have zero or more data volumes. - -.. versionadded:: v0.3.0 - -Getting Started -............... - -Using data volumes is as simple as adding a ``-v`` parameter to the ``docker run`` -command. The ``-v`` parameter can be used more than once in order to -create more volumes within the new container. To create a new container with -two new volumes:: - - $ docker run -v /var/volume1 -v /var/volume2 busybox true - -This command will create the new container with two new volumes that -exits instantly (``true`` is pretty much the smallest, simplest program -that you can run). Once created you can mount its volumes in any other -container using the ``--volumes-from`` option; irrespective of whether the -container is running or not. - -Or, you can use the VOLUME instruction in a Dockerfile to add one or more new -volumes to any container created from that image:: - - # BUILD-USING: docker build -t data . - # RUN-USING: docker run --name DATA data - FROM busybox - VOLUME ["/var/volume1", "/var/volume2"] - CMD ["/bin/true"] - -Creating and mounting a Data Volume Container ---------------------------------------------- - -If you have some persistent data that you want to share between containers, -or want to use from non-persistent containers, its best to create a named -Data Volume Container, and then to mount the data from it. - -Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``):: - - $ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true - -Then mount those data volumes into your application containers:: - - $ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash - -You can use multiple ``--volumes-from`` parameters to bring together multiple -data volumes from multiple containers. - -Interestingly, you can mount the volumes that came from the ``DATA`` container in -yet another container via the ``client1`` middleman container:: - - $ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash - -This allows you to abstract the actual data source from users of that data, -similar to :ref:`ambassador_pattern_linking `. - -If you remove containers that mount volumes, including the initial DATA container, -or the middleman, the volumes will not be deleted until there are no containers still -referencing those volumes. This allows you to upgrade, or effectively migrate data volumes -between containers. - -Mount a Host Directory as a Container Volume: ---------------------------------------------- - -:: - - -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. - -You must specify an absolute path for ``host-dir``. -If ``host-dir`` is missing from the command, then docker creates a new volume. -If ``host-dir`` is present but points to a non-existent directory on the host, -Docker will automatically create this directory and use it as the source of the -bind-mount. - -Note that this is not available from a Dockerfile due the portability and -sharing purpose of it. The ``host-dir`` volumes are entirely host-dependent and -might not work on any other machine. - -For example:: - - sudo docker run -t -i -v /var/logs:/var/host_logs:ro ubuntu bash - -The command above mounts the host directory ``/var/logs`` into the -container with read only permissions as ``/var/host_logs``. - -.. versionadded:: v0.5.0 - - -Note for OS/X users and remote daemon users: --------------------------------------------- - -OS/X users run ``boot2docker`` to create a minimalist virtual machine running the docker daemon. That -virtual machine then launches docker commands on behalf of the OS/X command line. The means that ``host -directories`` refer to directories in the ``boot2docker`` virtual machine, not the OS/X filesystem. - -Similarly, anytime when the docker daemon is on a remote machine, the ``host directories`` always refer to directories on the daemon's machine. - -Backup, restore, or migrate data volumes ----------------------------------------- - -You cannot back up volumes using ``docker export``, ``docker save`` and ``docker cp`` -because they are external to images. -Instead you can use ``--volumes-from`` to start a new container that can access the -data-container's volume. For example:: - - $ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data - -* ``--rm`` - remove the container when it exits -* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container -* ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to -* ``busybox`` - a small simpler image - good for quick maintenance -* ``tar cvf /backup/backup.tar /data`` - creates an uncompressed tar file of all the files in the ``/data`` directory - -Then to restore to the same container, or another that you've made elsewhere:: - - # create a new data container - $ sudo docker run -v /data --name DATA2 busybox true - # untar the backup files into the new container's data volume - $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar - data/ - data/sven.txt - # compare to the original container - $ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data - sven.txt - - -You can use the basic techniques above to automate backup, migration and restore -testing using your preferred tools. - -Known Issues -............ - -* :issue:`2702`: "lxc-start: Permission denied - failed to mount" - could indicate a permissions problem with AppArmor. Please see the - issue for a workaround. -* :issue:`2528`: the busybox container is used to make the resulting container as small and - simple as possible - whenever you need to interact with the data in the volume - you mount it into another container. diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst deleted file mode 100644 index c126361f8c..0000000000 --- a/docs/sources/use/workingwithrepository.rst +++ /dev/null @@ -1,256 +0,0 @@ -:title: Share Images via Repositories -:description: Repositories allow users to share images. -:keywords: repo, repositories, usage, pull image, push image, image, documentation - -.. _working_with_the_repository: - -Share Images via Repositories -============================= - -A *repository* is a shareable collection of tagged :ref:`images` -that together create the file systems for containers. The -repository's name is a label that indicates the provenance of the -repository, i.e. who created it and where the original copy is -located. - -You can find one or more repositories hosted on a *registry*. There -can be an implicit or explicit host name as part of the repository -tag. The implicit registry is located at ``index.docker.io``, the home -of "top-level" repositories and the Central Index. This registry may -also include public "user" repositories. - -Docker is not only a tool for creating and managing your own -:ref:`containers ` -- **Docker is also a tool for -sharing**. The Docker project provides a Central Registry to host -public repositories, namespaced by user, and a Central Index which -provides user authentication and search over all the public -repositories. You can host your own Registry too! Docker acts as a -client for these services via ``docker search, pull, login`` and -``push``. - -Local Repositories ------------------- - -Docker images which have been created and labeled on your local Docker server -need to be pushed to a Public or Private registry to be shared. - -.. _using_public_repositories: - -Public Repositories -------------------- - -There are two types of public repositories: *top-level* repositories -which are controlled by the Docker team, and *user* repositories -created by individual contributors. Anyone can read from these -repositories -- they really help people get started quickly! You could -also use :ref:`using_private_repositories` if you need to keep control -of who accesses your images, but we will only refer to public -repositories in these examples. - -* Top-level repositories can easily be recognized by **not** having a - ``/`` (slash) in their name. These repositories can generally be - trusted. -* User repositories always come in the form of - ``/``. This is what your published images will - look like if you push to the public Central Registry. -* Only the authenticated user can push to their *username* namespace - on the Central Registry. -* User images are not checked, it is therefore up to you whether or - not you trust the creator of this image. - -.. _searching_central_index: - -Find Public Images on the Central Index ---------------------------------------- - -You can search the Central Index `online `_ -or using the command line interface. Searching can find images by name, user -name or description: - -.. code-block:: bash - - $ sudo docker help search - Usage: docker search NAME - - Search the docker index for images - - --no-trunc=false: Don't truncate output - $ sudo docker search centos - Found 25 results matching your query ("centos") - NAME DESCRIPTION - centos - slantview/centos-chef-solo CentOS 6.4 with chef-solo. - ... - -There you can see two example results: ``centos`` and -``slantview/centos-chef-solo``. The second result shows that it comes -from the public repository of a user, ``slantview/``, while the first -result (``centos``) doesn't explicitly list a repository so it comes -from the trusted Central Repository. The ``/`` character separates a -user's repository and the image name. - -Once you have found the image name, you can download it: - -.. code-block:: bash - - # sudo docker pull - $ sudo docker pull centos - Pulling repository centos - 539c0211cd76: Download complete - -What can you do with that image? Check out the :ref:`example_list` -and, when you're ready with your own image, come back here to learn -how to share it. - -Contributing to the Central Registry ------------------------------------- - -Anyone can pull public images from the Central Registry, but if you -would like to share one of your own images, then you must register a -unique user name first. You can create your username and login on the -`central Docker Index online -`_, or by running - -.. code-block:: bash - - sudo docker login - -This will prompt you for a username, which will become a public -namespace for your public repositories. - -If your username is available then ``docker`` will also prompt you to -enter a password and your e-mail address. It will then automatically -log you in. Now you're ready to commit and push your own images! - -.. _container_commit: - -Committing a Container to a Named Image ---------------------------------------- - -When you make changes to an existing image, those changes get saved to -a container's file system. You can then promote that container to -become an image by making a ``commit``. In addition to converting the -container to an image, this is also your opportunity to name the -image, specifically a name that includes your user name from the -Central Docker Index (as you did a ``login`` above) and a meaningful -name for the image. - -.. code-block:: bash - - # format is "sudo docker commit /" - $ sudo docker commit $CONTAINER_ID myname/kickassapp - -.. _image_push: - -Pushing a repository to its registry ------------------------------------- - -In order to push an repository to its registry you need to have named an image, -or committed your container to a named image (see above) - -Now you can push this repository to the registry designated by its name -or tag. - -.. code-block:: bash - - # format is "docker push /" - $ sudo docker push myname/kickassapp - -.. _using_private_repositories: - -Trusted Builds --------------- - -Trusted Builds automate the building and updating of images from GitHub, directly -on ``docker.io`` servers. It works by adding a commit hook to your selected repository, -triggering a build and update when you push a commit. - -To setup a trusted build -++++++++++++++++++++++++ - -#. Create a `Docker Index account `_ and login. -#. Link your GitHub account through the ``Link Accounts`` menu. -#. `Configure a Trusted build `_. -#. Pick a GitHub project that has a ``Dockerfile`` that you want to build. -#. Pick the branch you want to build (the default is the ``master`` branch). -#. Give the Trusted Build a name. -#. Assign an optional Docker tag to the Build. -#. Specify where the ``Dockerfile`` is located. The default is ``/``. - -Once the Trusted Build is configured it will automatically trigger a build, and -in a few minutes, if there are no errors, you will see your new trusted build -on the Docker Index. It will will stay in sync with your GitHub repo until you -deactivate the Trusted Build. - -If you want to see the status of your Trusted Builds you can go to your -`Trusted Builds page `_ on the Docker index, -and it will show you the status of your builds, and the build history. - -Once you've created a Trusted Build you can deactivate or delete it. You cannot -however push to a Trusted Build with the ``docker push`` command. You can only -manage it by committing code to your GitHub repository. - -You can create multiple Trusted Builds per repository and configure them to -point to specific ``Dockerfile``'s or Git branches. - -Private Registry ----------------- - -Private registries and private shared repositories are -only possible by hosting `your own registry -`_. To push or pull to a -repository on your own registry, you must prefix the tag with the -address of the registry's host (a ``.`` or ``:`` is used to identify a host), -like this: - -.. code-block:: bash - - # Tag to create a repository with the full registry location. - # The location (e.g. localhost.localdomain:5000) becomes - # a permanent part of the repository name - sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name - - # Push the new repository to its home location on localhost - sudo docker push localhost.localdomain:5000/repo_name - -Once a repository has your registry's host name as part of the tag, -you can push and pull it like any other repository, but it will -**not** be searchable (or indexed at all) in the Central Index, and -there will be no user name checking performed. Your registry will -function completely independently from the Central Index. - -.. raw:: html - - - -.. seealso:: `Docker Blog: How to use your own registry - `_ - -Authentication file -------------------- - -The authentication is stored in a json file, ``.dockercfg`` located in your -home directory. It supports multiple registry urls. - -``docker login`` will create the "https://index.docker.io/v1/" key. - -``docker login https://my-registry.com`` will create the "https://my-registry.com" key. - -For example: - -.. code-block:: json - - { - "https://index.docker.io/v1/": { - "auth": "xXxXxXxXxXx=", - "email": "email@example.com" - }, - "https://my-registry.com": { - "auth": "XxXxXxXxXxX=", - "email": "email@my-registry.com" - } - } - -The ``auth`` field represents ``base64(:)`` diff --git a/docs/theme/docker/layout.html b/docs/theme/docker/layout.html deleted file mode 100755 index 0dac9e0680..0000000000 --- a/docs/theme/docker/layout.html +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - {{ meta['title'] if meta and meta['title'] else title }} - Docker Documentation - - - - - - - {%- set url_root = pathto('', 1) %} - {%- if url_root == '#' %}{% set url_root = '' %}{% endif %} - - {%- if current_version == 'latest' %} - {% set github_tag = 'master' %} - {% else %} - {% set github_tag = current_version %} - {% endif %} - - - - {%- set css_files = css_files + ['_static/css/bootstrap.css'] %} - {%- set css_files = css_files + ['_static/pygments.css'] %} - {%- set css_files = css_files + ['_static/css/main.css'] %} - - {%- set script_files = - ['//code.jquery.com/jquery-1.10.1.min.js'] - + ['//fonts.googleapis.com/css?family=Cabin:400,700,400italic'] - %} - - {# - This part is hopefully complex because things like |cut '/index/' are not available in Sphinx jinja - and will make it crash. (and we need index/ out. - #} - - - {%- for cssfile in css_files %} - - {%- endfor %} - - {%- for scriptfile in script_files if scriptfile != '_static/jquery.js' %} - - {%- endfor %} - - {%- block extrahead %}{% endblock %} - - - - - - -
- - -
- {% block body %}{% endblock %} -
- -
- - - diff --git a/docs/theme/docker/redirect_build.html b/docs/theme/docker/redirect_build.html deleted file mode 100644 index 1f26fc3aaa..0000000000 --- a/docs/theme/docker/redirect_build.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - Page Moved - - - - -This page has moved. Perhaps you should visit the Builder page - - - diff --git a/docs/theme/docker/redirect_home.html b/docs/theme/docker/redirect_home.html deleted file mode 100644 index 109239f819..0000000000 --- a/docs/theme/docker/redirect_home.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - Page Moved - - - - -This page has moved. Perhaps you should visit the Documentation Homepage - - - diff --git a/docs/theme/docker/static/css/bootstrap.css b/docs/theme/docker/static/css/bootstrap.css deleted file mode 100755 index b255056927..0000000000 --- a/docs/theme/docker/static/css/bootstrap.css +++ /dev/null @@ -1,6158 +0,0 @@ -/*! - * Bootstrap v2.3.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */ - -.clearfix { - *zoom: 1; -} - -.clearfix:before, -.clearfix:after { - display: table; - line-height: 0; - content: ""; -} - -.clearfix:after { - clear: both; -} - -.hide-text { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} - -.input-block-level { - display: block; - width: 100%; - min-height: 30px; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -nav, -section { - display: block; -} - -audio, -canvas, -video { - display: inline-block; - *display: inline; - *zoom: 1; -} - -audio:not([controls]) { - display: none; -} - -html { - font-size: 100%; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100%; -} - -a:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -a:hover, -a:active { - outline: 0; -} - -sub, -sup { - position: relative; - font-size: 75%; - line-height: 0; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -img { - width: auto\9; - height: auto; - max-width: 100%; - vertical-align: middle; - border: 0; - -ms-interpolation-mode: bicubic; -} - -#map_canvas img, -.google-maps img { - max-width: none; -} - -button, -input, -select, -textarea { - margin: 0; - font-size: 100%; - vertical-align: middle; -} - -button, -input { - *overflow: visible; - line-height: normal; -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - padding: 0; - border: 0; -} - -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; -} - -label, -select, -button, -input[type="button"], -input[type="reset"], -input[type="submit"], -input[type="radio"], -input[type="checkbox"] { - cursor: pointer; -} - -input[type="search"] { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - -webkit-appearance: textfield; -} - -input[type="search"]::-webkit-search-decoration, -input[type="search"]::-webkit-search-cancel-button { - -webkit-appearance: none; -} - -textarea { - overflow: auto; - vertical-align: top; -} - -@media print { - * { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - @page { - margin: 0.5cm; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } -} - -body { - margin: 0; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 20px; - color: #333333; - background-color: #ffffff; -} - -a { - color: #0088cc; - text-decoration: none; -} - -a:hover, -a:focus { - color: #005580; - text-decoration: underline; -} - -.img-rounded { - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.img-polaroid { - padding: 4px; - background-color: #fff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); - box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); -} - -.img-circle { - -webkit-border-radius: 500px; - -moz-border-radius: 500px; - border-radius: 500px; -} - -.row { - margin-left: -20px; - *zoom: 1; -} - -.row:before, -.row:after { - display: table; - line-height: 0; - content: ""; -} - -.row:after { - clear: both; -} - -[class*="span"] { - float: left; - min-height: 1px; - margin-left: 20px; -} - -.container, -.navbar-static-top .container, -.navbar-fixed-top .container, -.navbar-fixed-bottom .container { - width: 940px; -} - -.span12 { - width: 940px; -} - -.span11 { - width: 860px; -} - -.span10 { - width: 780px; -} - -.span9 { - width: 700px; -} - -.span8 { - width: 620px; -} - -.span7 { - width: 540px; -} - -.span6 { - width: 460px; -} - -.span5 { - width: 380px; -} - -.span4 { - width: 300px; -} - -.span3 { - width: 220px; -} - -.span2 { - width: 140px; -} - -.span1 { - width: 60px; -} - -.offset12 { - margin-left: 980px; -} - -.offset11 { - margin-left: 900px; -} - -.offset10 { - margin-left: 820px; -} - -.offset9 { - margin-left: 740px; -} - -.offset8 { - margin-left: 660px; -} - -.offset7 { - margin-left: 580px; -} - -.offset6 { - margin-left: 500px; -} - -.offset5 { - margin-left: 420px; -} - -.offset4 { - margin-left: 340px; -} - -.offset3 { - margin-left: 260px; -} - -.offset2 { - margin-left: 180px; -} - -.offset1 { - margin-left: 100px; -} - -.row-fluid { - width: 100%; - *zoom: 1; -} - -.row-fluid:before, -.row-fluid:after { - display: table; - line-height: 0; - content: ""; -} - -.row-fluid:after { - clear: both; -} - -.row-fluid [class*="span"] { - display: block; - float: left; - width: 100%; - min-height: 30px; - margin-left: 2.127659574468085%; - *margin-left: 2.074468085106383%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -.row-fluid [class*="span"]:first-child { - margin-left: 0; -} - -.row-fluid .controls-row [class*="span"] + [class*="span"] { - margin-left: 2.127659574468085%; -} - -.row-fluid .span12 { - width: 100%; - *width: 99.94680851063829%; -} - -.row-fluid .span11 { - width: 91.48936170212765%; - *width: 91.43617021276594%; -} - -.row-fluid .span10 { - width: 82.97872340425532%; - *width: 82.92553191489361%; -} - -.row-fluid .span9 { - width: 74.46808510638297%; - *width: 74.41489361702126%; -} - -.row-fluid .span8 { - width: 65.95744680851064%; - *width: 65.90425531914893%; -} - -.row-fluid .span7 { - width: 57.44680851063829%; - *width: 57.39361702127659%; -} - -.row-fluid .span6 { - width: 48.93617021276595%; - *width: 48.88297872340425%; -} - -.row-fluid .span5 { - width: 40.42553191489362%; - *width: 40.37234042553192%; -} - -.row-fluid .span4 { - width: 31.914893617021278%; - *width: 31.861702127659576%; -} - -.row-fluid .span3 { - width: 23.404255319148934%; - *width: 23.351063829787233%; -} - -.row-fluid .span2 { - width: 14.893617021276595%; - *width: 14.840425531914894%; -} - -.row-fluid .span1 { - width: 6.382978723404255%; - *width: 6.329787234042553%; -} - -.row-fluid .offset12 { - margin-left: 104.25531914893617%; - *margin-left: 104.14893617021275%; -} - -.row-fluid .offset12:first-child { - margin-left: 102.12765957446808%; - *margin-left: 102.02127659574467%; -} - -.row-fluid .offset11 { - margin-left: 95.74468085106382%; - *margin-left: 95.6382978723404%; -} - -.row-fluid .offset11:first-child { - margin-left: 93.61702127659574%; - *margin-left: 93.51063829787232%; -} - -.row-fluid .offset10 { - margin-left: 87.23404255319149%; - *margin-left: 87.12765957446807%; -} - -.row-fluid .offset10:first-child { - margin-left: 85.1063829787234%; - *margin-left: 84.99999999999999%; -} - -.row-fluid .offset9 { - margin-left: 78.72340425531914%; - *margin-left: 78.61702127659572%; -} - -.row-fluid .offset9:first-child { - margin-left: 76.59574468085106%; - *margin-left: 76.48936170212764%; -} - -.row-fluid .offset8 { - margin-left: 70.2127659574468%; - *margin-left: 70.10638297872339%; -} - -.row-fluid .offset8:first-child { - margin-left: 68.08510638297872%; - *margin-left: 67.9787234042553%; -} - -.row-fluid .offset7 { - margin-left: 61.70212765957446%; - *margin-left: 61.59574468085106%; -} - -.row-fluid .offset7:first-child { - margin-left: 59.574468085106375%; - *margin-left: 59.46808510638297%; -} - -.row-fluid .offset6 { - margin-left: 53.191489361702125%; - *margin-left: 53.085106382978715%; -} - -.row-fluid .offset6:first-child { - margin-left: 51.063829787234035%; - *margin-left: 50.95744680851063%; -} - -.row-fluid .offset5 { - margin-left: 44.68085106382979%; - *margin-left: 44.57446808510638%; -} - -.row-fluid .offset5:first-child { - margin-left: 42.5531914893617%; - *margin-left: 42.4468085106383%; -} - -.row-fluid .offset4 { - margin-left: 36.170212765957444%; - *margin-left: 36.06382978723405%; -} - -.row-fluid .offset4:first-child { - margin-left: 34.04255319148936%; - *margin-left: 33.93617021276596%; -} - -.row-fluid .offset3 { - margin-left: 27.659574468085104%; - *margin-left: 27.5531914893617%; -} - -.row-fluid .offset3:first-child { - margin-left: 25.53191489361702%; - *margin-left: 25.425531914893618%; -} - -.row-fluid .offset2 { - margin-left: 19.148936170212764%; - *margin-left: 19.04255319148936%; -} - -.row-fluid .offset2:first-child { - margin-left: 17.02127659574468%; - *margin-left: 16.914893617021278%; -} - -.row-fluid .offset1 { - margin-left: 10.638297872340425%; - *margin-left: 10.53191489361702%; -} - -.row-fluid .offset1:first-child { - margin-left: 8.51063829787234%; - *margin-left: 8.404255319148938%; -} - -[class*="span"].hide, -.row-fluid [class*="span"].hide { - display: none; -} - -[class*="span"].pull-right, -.row-fluid [class*="span"].pull-right { - float: right; -} - -.container { - margin-right: auto; - margin-left: auto; - *zoom: 1; -} - -.container:before, -.container:after { - display: table; - line-height: 0; - content: ""; -} - -.container:after { - clear: both; -} - -.container-fluid { - padding-right: 20px; - padding-left: 20px; - *zoom: 1; -} - -.container-fluid:before, -.container-fluid:after { - display: table; - line-height: 0; - content: ""; -} - -.container-fluid:after { - clear: both; -} - -p { - margin: 0 0 10px; -} - -.lead { - margin-bottom: 20px; - font-size: 21px; - font-weight: 200; - line-height: 30px; -} - -small { - font-size: 85%; -} - -strong { - font-weight: bold; -} - -em { - font-style: italic; -} - -cite { - font-style: normal; -} - -.muted { - color: #999999; -} - -a.muted:hover, -a.muted:focus { - color: #808080; -} - -.text-warning { - color: #c09853; -} - -a.text-warning:hover, -a.text-warning:focus { - color: #a47e3c; -} - -.text-error { - color: #b94a48; -} - -a.text-error:hover, -a.text-error:focus { - color: #953b39; -} - -.text-info { - color: #3a87ad; -} - -a.text-info:hover, -a.text-info:focus { - color: #2d6987; -} - -.text-success { - color: #468847; -} - -a.text-success:hover, -a.text-success:focus { - color: #356635; -} - -.text-left { - text-align: left; -} - -.text-right { - text-align: right; -} - -.text-center { - text-align: center; -} - -h1, -h2, -h3, -h4, -h5, -h6 { - margin: 10px 0; - font-family: inherit; - font-weight: bold; - line-height: 20px; - color: inherit; - text-rendering: optimizelegibility; -} - -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small { - font-weight: normal; - line-height: 1; - color: #999999; -} - -h1, -h2, -h3 { - line-height: 40px; -} - -h1 { - font-size: 38.5px; -} - -h2 { - font-size: 31.5px; -} - -h3 { - font-size: 24.5px; -} - -h4 { - font-size: 17.5px; -} - -h5 { - font-size: 14px; -} - -h6 { - font-size: 11.9px; -} - -h1 small { - font-size: 24.5px; -} - -h2 small { - font-size: 17.5px; -} - -h3 small { - font-size: 14px; -} - -h4 small { - font-size: 14px; -} - -.page-header { - padding-bottom: 9px; - margin: 20px 0 30px; - border-bottom: 1px solid #eeeeee; -} - -ul, -ol { - padding: 0; - margin: 0 0 10px 25px; -} - -ul ul, -ul ol, -ol ol, -ol ul { - margin-bottom: 0; -} - -li { - line-height: 20px; -} - -ul.unstyled, -ol.unstyled { - margin-left: 0; - list-style: none; -} - -ul.inline, -ol.inline { - margin-left: 0; - list-style: none; -} - -ul.inline > li, -ol.inline > li { - display: inline-block; - *display: inline; - padding-right: 5px; - padding-left: 5px; - *zoom: 1; -} - -dl { - margin-bottom: 20px; -} - -dt, -dd { - line-height: 20px; -} - -dt { - font-weight: bold; -} - -dd { - margin-left: 10px; -} - -.dl-horizontal { - *zoom: 1; -} - -.dl-horizontal:before, -.dl-horizontal:after { - display: table; - line-height: 0; - content: ""; -} - -.dl-horizontal:after { - clear: both; -} - -.dl-horizontal dt { - float: left; - width: 160px; - overflow: hidden; - clear: left; - text-align: right; - text-overflow: ellipsis; - white-space: nowrap; -} - -.dl-horizontal dd { - margin-left: 180px; -} - -hr { - margin: 20px 0; - border: 0; - border-top: 1px solid #eeeeee; - border-bottom: 1px solid #ffffff; -} - -abbr[title], -abbr[data-original-title] { - cursor: help; - border-bottom: 1px dotted #999999; -} - -abbr.initialism { - font-size: 90%; - text-transform: uppercase; -} - -blockquote { - padding: 0 0 0 15px; - margin: 0 0 20px; - border-left: 5px solid #eeeeee; -} - -blockquote p { - margin-bottom: 0; - font-size: 17.5px; - font-weight: 300; - line-height: 1.25; -} - -blockquote small { - display: block; - line-height: 20px; - color: #999999; -} - -blockquote small:before { - content: '\2014 \00A0'; -} - -blockquote.pull-right { - float: right; - padding-right: 15px; - padding-left: 0; - border-right: 5px solid #eeeeee; - border-left: 0; -} - -blockquote.pull-right p, -blockquote.pull-right small { - text-align: right; -} - -blockquote.pull-right small:before { - content: ''; -} - -blockquote.pull-right small:after { - content: '\00A0 \2014'; -} - -q:before, -q:after, -blockquote:before, -blockquote:after { - content: ""; -} - -address { - display: block; - margin-bottom: 20px; - font-style: normal; - line-height: 20px; -} - -code, -pre { - padding: 0 3px 2px; - font-family: Monaco, Menlo, Consolas, "Courier New", monospace; - font-size: 12px; - color: #333333; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -code { - padding: 2px 4px; - color: #d14; - white-space: nowrap; - background-color: #f7f7f9; - border: 1px solid #e1e1e8; -} - -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 20px; - word-break: break-all; - word-wrap: break-word; - white-space: pre; - white-space: pre-wrap; - background-color: #f5f5f5; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.15); - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -pre.prettyprint { - margin-bottom: 20px; -} - -pre code { - padding: 0; - color: inherit; - white-space: pre; - white-space: pre-wrap; - background-color: transparent; - border: 0; -} - -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} - -form { - margin: 0 0 20px; -} - -fieldset { - padding: 0; - margin: 0; - border: 0; -} - -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: 40px; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} - -legend small { - font-size: 15px; - color: #999999; -} - -label, -input, -button, -select, -textarea { - font-size: 14px; - font-weight: normal; - line-height: 20px; -} - -input, -button, -select, -textarea { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; -} - -label { - display: block; - margin-bottom: 5px; -} - -select, -textarea, -input[type="text"], -input[type="password"], -input[type="datetime"], -input[type="datetime-local"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="week"], -input[type="number"], -input[type="email"], -input[type="url"], -input[type="search"], -input[type="tel"], -input[type="color"], -.uneditable-input { - display: inline-block; - height: 20px; - padding: 4px 6px; - margin-bottom: 10px; - font-size: 14px; - line-height: 20px; - color: #555555; - vertical-align: middle; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -input, -textarea, -.uneditable-input { - width: 206px; -} - -textarea { - height: auto; -} - -textarea, -input[type="text"], -input[type="password"], -input[type="datetime"], -input[type="datetime-local"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="week"], -input[type="number"], -input[type="email"], -input[type="url"], -input[type="search"], -input[type="tel"], -input[type="color"], -.uneditable-input { - background-color: #ffffff; - border: 1px solid #cccccc; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border linear 0.2s, box-shadow linear 0.2s; - -moz-transition: border linear 0.2s, box-shadow linear 0.2s; - -o-transition: border linear 0.2s, box-shadow linear 0.2s; - transition: border linear 0.2s, box-shadow linear 0.2s; -} - -textarea:focus, -input[type="text"]:focus, -input[type="password"]:focus, -input[type="datetime"]:focus, -input[type="datetime-local"]:focus, -input[type="date"]:focus, -input[type="month"]:focus, -input[type="time"]:focus, -input[type="week"]:focus, -input[type="number"]:focus, -input[type="email"]:focus, -input[type="url"]:focus, -input[type="search"]:focus, -input[type="tel"]:focus, -input[type="color"]:focus, -.uneditable-input:focus { - border-color: rgba(82, 168, 236, 0.8); - outline: 0; - outline: thin dotted \9; - /* IE6-9 */ - - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(82, 168, 236, 0.6); -} - -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - *margin-top: 0; - line-height: normal; -} - -input[type="file"], -input[type="image"], -input[type="submit"], -input[type="reset"], -input[type="button"], -input[type="radio"], -input[type="checkbox"] { - width: auto; -} - -select, -input[type="file"] { - height: 30px; - /* In IE7, the height of the select element cannot be changed by height, only font-size */ - - *margin-top: 4px; - /* For IE7, add top margin to align select with labels */ - - line-height: 30px; -} - -select { - width: 220px; - background-color: #ffffff; - border: 1px solid #cccccc; -} - -select[multiple], -select[size] { - height: auto; -} - -select:focus, -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.uneditable-input, -.uneditable-textarea { - color: #999999; - cursor: not-allowed; - background-color: #fcfcfc; - border-color: #cccccc; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.025); -} - -.uneditable-input { - overflow: hidden; - white-space: nowrap; -} - -.uneditable-textarea { - width: auto; - height: auto; -} - -input:-moz-placeholder, -textarea:-moz-placeholder { - color: #999999; -} - -input:-ms-input-placeholder, -textarea:-ms-input-placeholder { - color: #999999; -} - -input::-webkit-input-placeholder, -textarea::-webkit-input-placeholder { - color: #999999; -} - -.radio, -.checkbox { - min-height: 20px; - padding-left: 20px; -} - -.radio input[type="radio"], -.checkbox input[type="checkbox"] { - float: left; - margin-left: -20px; -} - -.controls > .radio:first-child, -.controls > .checkbox:first-child { - padding-top: 5px; -} - -.radio.inline, -.checkbox.inline { - display: inline-block; - padding-top: 5px; - margin-bottom: 0; - vertical-align: middle; -} - -.radio.inline + .radio.inline, -.checkbox.inline + .checkbox.inline { - margin-left: 10px; -} - -.input-mini { - width: 60px; -} - -.input-small { - width: 90px; -} - -.input-medium { - width: 150px; -} - -.input-large { - width: 210px; -} - -.input-xlarge { - width: 270px; -} - -.input-xxlarge { - width: 530px; -} - -input[class*="span"], -select[class*="span"], -textarea[class*="span"], -.uneditable-input[class*="span"], -.row-fluid input[class*="span"], -.row-fluid select[class*="span"], -.row-fluid textarea[class*="span"], -.row-fluid .uneditable-input[class*="span"] { - float: none; - margin-left: 0; -} - -.input-append input[class*="span"], -.input-append .uneditable-input[class*="span"], -.input-prepend input[class*="span"], -.input-prepend .uneditable-input[class*="span"], -.row-fluid input[class*="span"], -.row-fluid select[class*="span"], -.row-fluid textarea[class*="span"], -.row-fluid .uneditable-input[class*="span"], -.row-fluid .input-prepend [class*="span"], -.row-fluid .input-append [class*="span"] { - display: inline-block; -} - -input, -textarea, -.uneditable-input { - margin-left: 0; -} - -.controls-row [class*="span"] + [class*="span"] { - margin-left: 20px; -} - -input.span12, -textarea.span12, -.uneditable-input.span12 { - width: 926px; -} - -input.span11, -textarea.span11, -.uneditable-input.span11 { - width: 846px; -} - -input.span10, -textarea.span10, -.uneditable-input.span10 { - width: 766px; -} - -input.span9, -textarea.span9, -.uneditable-input.span9 { - width: 686px; -} - -input.span8, -textarea.span8, -.uneditable-input.span8 { - width: 606px; -} - -input.span7, -textarea.span7, -.uneditable-input.span7 { - width: 526px; -} - -input.span6, -textarea.span6, -.uneditable-input.span6 { - width: 446px; -} - -input.span5, -textarea.span5, -.uneditable-input.span5 { - width: 366px; -} - -input.span4, -textarea.span4, -.uneditable-input.span4 { - width: 286px; -} - -input.span3, -textarea.span3, -.uneditable-input.span3 { - width: 206px; -} - -input.span2, -textarea.span2, -.uneditable-input.span2 { - width: 126px; -} - -input.span1, -textarea.span1, -.uneditable-input.span1 { - width: 46px; -} - -.controls-row { - *zoom: 1; -} - -.controls-row:before, -.controls-row:after { - display: table; - line-height: 0; - content: ""; -} - -.controls-row:after { - clear: both; -} - -.controls-row [class*="span"], -.row-fluid .controls-row [class*="span"] { - float: left; -} - -.controls-row .checkbox[class*="span"], -.controls-row .radio[class*="span"] { - padding-top: 5px; -} - -input[disabled], -select[disabled], -textarea[disabled], -input[readonly], -select[readonly], -textarea[readonly] { - cursor: not-allowed; - background-color: #eeeeee; -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled], -input[type="radio"][readonly], -input[type="checkbox"][readonly] { - background-color: transparent; -} - -.control-group.warning .control-label, -.control-group.warning .help-block, -.control-group.warning .help-inline { - color: #c09853; -} - -.control-group.warning .checkbox, -.control-group.warning .radio, -.control-group.warning input, -.control-group.warning select, -.control-group.warning textarea { - color: #c09853; -} - -.control-group.warning input, -.control-group.warning select, -.control-group.warning textarea { - border-color: #c09853; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.warning input:focus, -.control-group.warning select:focus, -.control-group.warning textarea:focus { - border-color: #a47e3c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #dbc59e; -} - -.control-group.warning .input-prepend .add-on, -.control-group.warning .input-append .add-on { - color: #c09853; - background-color: #fcf8e3; - border-color: #c09853; -} - -.control-group.error .control-label, -.control-group.error .help-block, -.control-group.error .help-inline { - color: #b94a48; -} - -.control-group.error .checkbox, -.control-group.error .radio, -.control-group.error input, -.control-group.error select, -.control-group.error textarea { - color: #b94a48; -} - -.control-group.error input, -.control-group.error select, -.control-group.error textarea { - border-color: #b94a48; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.error input:focus, -.control-group.error select:focus, -.control-group.error textarea:focus { - border-color: #953b39; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #d59392; -} - -.control-group.error .input-prepend .add-on, -.control-group.error .input-append .add-on { - color: #b94a48; - background-color: #f2dede; - border-color: #b94a48; -} - -.control-group.success .control-label, -.control-group.success .help-block, -.control-group.success .help-inline { - color: #468847; -} - -.control-group.success .checkbox, -.control-group.success .radio, -.control-group.success input, -.control-group.success select, -.control-group.success textarea { - color: #468847; -} - -.control-group.success input, -.control-group.success select, -.control-group.success textarea { - border-color: #468847; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.success input:focus, -.control-group.success select:focus, -.control-group.success textarea:focus { - border-color: #356635; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7aba7b; -} - -.control-group.success .input-prepend .add-on, -.control-group.success .input-append .add-on { - color: #468847; - background-color: #dff0d8; - border-color: #468847; -} - -.control-group.info .control-label, -.control-group.info .help-block, -.control-group.info .help-inline { - color: #3a87ad; -} - -.control-group.info .checkbox, -.control-group.info .radio, -.control-group.info input, -.control-group.info select, -.control-group.info textarea { - color: #3a87ad; -} - -.control-group.info input, -.control-group.info select, -.control-group.info textarea { - border-color: #3a87ad; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} - -.control-group.info input:focus, -.control-group.info select:focus, -.control-group.info textarea:focus { - border-color: #2d6987; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #7ab5d3; -} - -.control-group.info .input-prepend .add-on, -.control-group.info .input-append .add-on { - color: #3a87ad; - background-color: #d9edf7; - border-color: #3a87ad; -} - -input:focus:invalid, -textarea:focus:invalid, -select:focus:invalid { - color: #b94a48; - border-color: #ee5f5b; -} - -input:focus:invalid:focus, -textarea:focus:invalid:focus, -select:focus:invalid:focus { - border-color: #e9322d; - -webkit-box-shadow: 0 0 6px #f8b9b7; - -moz-box-shadow: 0 0 6px #f8b9b7; - box-shadow: 0 0 6px #f8b9b7; -} - -.form-actions { - padding: 19px 20px 20px; - margin-top: 20px; - margin-bottom: 20px; - background-color: #f5f5f5; - border-top: 1px solid #e5e5e5; - *zoom: 1; -} - -.form-actions:before, -.form-actions:after { - display: table; - line-height: 0; - content: ""; -} - -.form-actions:after { - clear: both; -} - -.help-block, -.help-inline { - color: #595959; -} - -.help-block { - display: block; - margin-bottom: 10px; -} - -.help-inline { - display: inline-block; - *display: inline; - padding-left: 5px; - vertical-align: middle; - *zoom: 1; -} - -.input-append, -.input-prepend { - display: inline-block; - margin-bottom: 10px; - font-size: 0; - white-space: nowrap; - vertical-align: middle; -} - -.input-append input, -.input-prepend input, -.input-append select, -.input-prepend select, -.input-append .uneditable-input, -.input-prepend .uneditable-input, -.input-append .dropdown-menu, -.input-prepend .dropdown-menu, -.input-append .popover, -.input-prepend .popover { - font-size: 14px; -} - -.input-append input, -.input-prepend input, -.input-append select, -.input-prepend select, -.input-append .uneditable-input, -.input-prepend .uneditable-input { - position: relative; - margin-bottom: 0; - *margin-left: 0; - vertical-align: top; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-append input:focus, -.input-prepend input:focus, -.input-append select:focus, -.input-prepend select:focus, -.input-append .uneditable-input:focus, -.input-prepend .uneditable-input:focus { - z-index: 2; -} - -.input-append .add-on, -.input-prepend .add-on { - display: inline-block; - width: auto; - height: 20px; - min-width: 16px; - padding: 4px 5px; - font-size: 14px; - font-weight: normal; - line-height: 20px; - text-align: center; - text-shadow: 0 1px 0 #ffffff; - background-color: #eeeeee; - border: 1px solid #ccc; -} - -.input-append .add-on, -.input-prepend .add-on, -.input-append .btn, -.input-prepend .btn, -.input-append .btn-group > .dropdown-toggle, -.input-prepend .btn-group > .dropdown-toggle { - vertical-align: top; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.input-append .active, -.input-prepend .active { - background-color: #a9dba9; - border-color: #46a546; -} - -.input-prepend .add-on, -.input-prepend .btn { - margin-right: -1px; -} - -.input-prepend .add-on:first-child, -.input-prepend .btn:first-child { - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-append input, -.input-append select, -.input-append .uneditable-input { - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-append input + .btn-group .btn:last-child, -.input-append select + .btn-group .btn:last-child, -.input-append .uneditable-input + .btn-group .btn:last-child { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-append .add-on, -.input-append .btn, -.input-append .btn-group { - margin-left: -1px; -} - -.input-append .add-on:last-child, -.input-append .btn:last-child, -.input-append .btn-group:last-child > .dropdown-toggle { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append input, -.input-prepend.input-append select, -.input-prepend.input-append .uneditable-input { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.input-prepend.input-append input + .btn-group .btn, -.input-prepend.input-append select + .btn-group .btn, -.input-prepend.input-append .uneditable-input + .btn-group .btn { - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append .add-on:first-child, -.input-prepend.input-append .btn:first-child { - margin-right: -1px; - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.input-prepend.input-append .add-on:last-child, -.input-prepend.input-append .btn:last-child { - margin-left: -1px; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.input-prepend.input-append .btn-group:first-child { - margin-left: 0; -} - -input.search-query { - padding-right: 14px; - padding-right: 4px \9; - padding-left: 14px; - padding-left: 4px \9; - /* IE7-8 doesn't have border-radius, so don't indent the padding */ - - margin-bottom: 0; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -/* Allow for input prepend/append in search forms */ - -.form-search .input-append .search-query, -.form-search .input-prepend .search-query { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.form-search .input-append .search-query { - -webkit-border-radius: 14px 0 0 14px; - -moz-border-radius: 14px 0 0 14px; - border-radius: 14px 0 0 14px; -} - -.form-search .input-append .btn { - -webkit-border-radius: 0 14px 14px 0; - -moz-border-radius: 0 14px 14px 0; - border-radius: 0 14px 14px 0; -} - -.form-search .input-prepend .search-query { - -webkit-border-radius: 0 14px 14px 0; - -moz-border-radius: 0 14px 14px 0; - border-radius: 0 14px 14px 0; -} - -.form-search .input-prepend .btn { - -webkit-border-radius: 14px 0 0 14px; - -moz-border-radius: 14px 0 0 14px; - border-radius: 14px 0 0 14px; -} - -.form-search input, -.form-inline input, -.form-horizontal input, -.form-search textarea, -.form-inline textarea, -.form-horizontal textarea, -.form-search select, -.form-inline select, -.form-horizontal select, -.form-search .help-inline, -.form-inline .help-inline, -.form-horizontal .help-inline, -.form-search .uneditable-input, -.form-inline .uneditable-input, -.form-horizontal .uneditable-input, -.form-search .input-prepend, -.form-inline .input-prepend, -.form-horizontal .input-prepend, -.form-search .input-append, -.form-inline .input-append, -.form-horizontal .input-append { - display: inline-block; - *display: inline; - margin-bottom: 0; - vertical-align: middle; - *zoom: 1; -} - -.form-search .hide, -.form-inline .hide, -.form-horizontal .hide { - display: none; -} - -.form-search label, -.form-inline label, -.form-search .btn-group, -.form-inline .btn-group { - display: inline-block; -} - -.form-search .input-append, -.form-inline .input-append, -.form-search .input-prepend, -.form-inline .input-prepend { - margin-bottom: 0; -} - -.form-search .radio, -.form-search .checkbox, -.form-inline .radio, -.form-inline .checkbox { - padding-left: 0; - margin-bottom: 0; - vertical-align: middle; -} - -.form-search .radio input[type="radio"], -.form-search .checkbox input[type="checkbox"], -.form-inline .radio input[type="radio"], -.form-inline .checkbox input[type="checkbox"] { - float: left; - margin-right: 3px; - margin-left: 0; -} - -.control-group { - margin-bottom: 10px; -} - -legend + .control-group { - margin-top: 20px; - -webkit-margin-top-collapse: separate; -} - -.form-horizontal .control-group { - margin-bottom: 20px; - *zoom: 1; -} - -.form-horizontal .control-group:before, -.form-horizontal .control-group:after { - display: table; - line-height: 0; - content: ""; -} - -.form-horizontal .control-group:after { - clear: both; -} - -.form-horizontal .control-label { - float: left; - width: 160px; - padding-top: 5px; - text-align: right; -} - -.form-horizontal .controls { - *display: inline-block; - *padding-left: 20px; - margin-left: 180px; - *margin-left: 0; -} - -.form-horizontal .controls:first-child { - *padding-left: 180px; -} - -.form-horizontal .help-block { - margin-bottom: 0; -} - -.form-horizontal input + .help-block, -.form-horizontal select + .help-block, -.form-horizontal textarea + .help-block, -.form-horizontal .uneditable-input + .help-block, -.form-horizontal .input-prepend + .help-block, -.form-horizontal .input-append + .help-block { - margin-top: 10px; -} - -.form-horizontal .form-actions { - padding-left: 180px; -} - -table { - max-width: 100%; - background-color: transparent; - border-collapse: collapse; - border-spacing: 0; -} - -.table { - width: 100%; - margin-bottom: 20px; -} - -.table th, -.table td { - padding: 8px; - line-height: 20px; - text-align: left; - vertical-align: top; - border-top: 1px solid #dddddd; -} - -.table th { - font-weight: bold; -} - -.table thead th { - vertical-align: bottom; -} - -.table caption + thead tr:first-child th, -.table caption + thead tr:first-child td, -.table colgroup + thead tr:first-child th, -.table colgroup + thead tr:first-child td, -.table thead:first-child tr:first-child th, -.table thead:first-child tr:first-child td { - border-top: 0; -} - -.table tbody + tbody { - border-top: 2px solid #dddddd; -} - -.table .table { - background-color: #ffffff; -} - -.table-condensed th, -.table-condensed td { - padding: 4px 5px; -} - -.table-bordered { - border: 1px solid #dddddd; - border-collapse: separate; - *border-collapse: collapse; - border-left: 0; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.table-bordered th, -.table-bordered td { - border-left: 1px solid #dddddd; -} - -.table-bordered caption + thead tr:first-child th, -.table-bordered caption + tbody tr:first-child th, -.table-bordered caption + tbody tr:first-child td, -.table-bordered colgroup + thead tr:first-child th, -.table-bordered colgroup + tbody tr:first-child th, -.table-bordered colgroup + tbody tr:first-child td, -.table-bordered thead:first-child tr:first-child th, -.table-bordered tbody:first-child tr:first-child th, -.table-bordered tbody:first-child tr:first-child td { - border-top: 0; -} - -.table-bordered thead:first-child tr:first-child > th:first-child, -.table-bordered tbody:first-child tr:first-child > td:first-child, -.table-bordered tbody:first-child tr:first-child > th:first-child { - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topleft: 4px; -} - -.table-bordered thead:first-child tr:first-child > th:last-child, -.table-bordered tbody:first-child tr:first-child > td:last-child, -.table-bordered tbody:first-child tr:first-child > th:last-child { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -moz-border-radius-topright: 4px; -} - -.table-bordered thead:last-child tr:last-child > th:first-child, -.table-bordered tbody:last-child tr:last-child > td:first-child, -.table-bordered tbody:last-child tr:last-child > th:first-child, -.table-bordered tfoot:last-child tr:last-child > td:first-child, -.table-bordered tfoot:last-child tr:last-child > th:first-child { - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; -} - -.table-bordered thead:last-child tr:last-child > th:last-child, -.table-bordered tbody:last-child tr:last-child > td:last-child, -.table-bordered tbody:last-child tr:last-child > th:last-child, -.table-bordered tfoot:last-child tr:last-child > td:last-child, -.table-bordered tfoot:last-child tr:last-child > th:last-child { - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-bottomright: 4px; -} - -.table-bordered tfoot + tbody:last-child tr:last-child td:first-child { - -webkit-border-bottom-left-radius: 0; - border-bottom-left-radius: 0; - -moz-border-radius-bottomleft: 0; -} - -.table-bordered tfoot + tbody:last-child tr:last-child td:last-child { - -webkit-border-bottom-right-radius: 0; - border-bottom-right-radius: 0; - -moz-border-radius-bottomright: 0; -} - -.table-bordered caption + thead tr:first-child th:first-child, -.table-bordered caption + tbody tr:first-child td:first-child, -.table-bordered colgroup + thead tr:first-child th:first-child, -.table-bordered colgroup + tbody tr:first-child td:first-child { - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topleft: 4px; -} - -.table-bordered caption + thead tr:first-child th:last-child, -.table-bordered caption + tbody tr:first-child td:last-child, -.table-bordered colgroup + thead tr:first-child th:last-child, -.table-bordered colgroup + tbody tr:first-child td:last-child { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -moz-border-radius-topright: 4px; -} - -.table-striped tbody > tr:nth-child(odd) > td, -.table-striped tbody > tr:nth-child(odd) > th { - background-color: #f9f9f9; -} - -.table-hover tbody tr:hover > td, -.table-hover tbody tr:hover > th { - background-color: #f5f5f5; -} - -table td[class*="span"], -table th[class*="span"], -.row-fluid table td[class*="span"], -.row-fluid table th[class*="span"] { - display: table-cell; - float: none; - margin-left: 0; -} - -.table td.span1, -.table th.span1 { - float: none; - width: 44px; - margin-left: 0; -} - -.table td.span2, -.table th.span2 { - float: none; - width: 124px; - margin-left: 0; -} - -.table td.span3, -.table th.span3 { - float: none; - width: 204px; - margin-left: 0; -} - -.table td.span4, -.table th.span4 { - float: none; - width: 284px; - margin-left: 0; -} - -.table td.span5, -.table th.span5 { - float: none; - width: 364px; - margin-left: 0; -} - -.table td.span6, -.table th.span6 { - float: none; - width: 444px; - margin-left: 0; -} - -.table td.span7, -.table th.span7 { - float: none; - width: 524px; - margin-left: 0; -} - -.table td.span8, -.table th.span8 { - float: none; - width: 604px; - margin-left: 0; -} - -.table td.span9, -.table th.span9 { - float: none; - width: 684px; - margin-left: 0; -} - -.table td.span10, -.table th.span10 { - float: none; - width: 764px; - margin-left: 0; -} - -.table td.span11, -.table th.span11 { - float: none; - width: 844px; - margin-left: 0; -} - -.table td.span12, -.table th.span12 { - float: none; - width: 924px; - margin-left: 0; -} - -.table tbody tr.success > td { - background-color: #dff0d8; -} - -.table tbody tr.error > td { - background-color: #f2dede; -} - -.table tbody tr.warning > td { - background-color: #fcf8e3; -} - -.table tbody tr.info > td { - background-color: #d9edf7; -} - -.table-hover tbody tr.success:hover > td { - background-color: #d0e9c6; -} - -.table-hover tbody tr.error:hover > td { - background-color: #ebcccc; -} - -.table-hover tbody tr.warning:hover > td { - background-color: #faf2cc; -} - -.table-hover tbody tr.info:hover > td { - background-color: #c4e3f3; -} - -[class^="icon-"], -[class*=" icon-"] { - display: inline-block; - width: 14px; - height: 14px; - margin-top: 1px; - *margin-right: .3em; - line-height: 14px; - vertical-align: text-top; - background-image: url("../img/glyphicons-halflings.png"); - background-position: 14px 14px; - background-repeat: no-repeat; -} - -/* White icons with optional class, or on hover/focus/active states of certain elements */ - -.icon-white, -.nav-pills > .active > a > [class^="icon-"], -.nav-pills > .active > a > [class*=" icon-"], -.nav-list > .active > a > [class^="icon-"], -.nav-list > .active > a > [class*=" icon-"], -.navbar-inverse .nav > .active > a > [class^="icon-"], -.navbar-inverse .nav > .active > a > [class*=" icon-"], -.dropdown-menu > li > a:hover > [class^="icon-"], -.dropdown-menu > li > a:focus > [class^="icon-"], -.dropdown-menu > li > a:hover > [class*=" icon-"], -.dropdown-menu > li > a:focus > [class*=" icon-"], -.dropdown-menu > .active > a > [class^="icon-"], -.dropdown-menu > .active > a > [class*=" icon-"], -.dropdown-submenu:hover > a > [class^="icon-"], -.dropdown-submenu:focus > a > [class^="icon-"], -.dropdown-submenu:hover > a > [class*=" icon-"], -.dropdown-submenu:focus > a > [class*=" icon-"] { - background-image: url("../img/glyphicons-halflings-white.png"); -} - -.icon-glass { - background-position: 0 0; -} - -.icon-music { - background-position: -24px 0; -} - -.icon-search { - background-position: -48px 0; -} - -.icon-envelope { - background-position: -72px 0; -} - -.icon-heart { - background-position: -96px 0; -} - -.icon-star { - background-position: -120px 0; -} - -.icon-star-empty { - background-position: -144px 0; -} - -.icon-user { - background-position: -168px 0; -} - -.icon-film { - background-position: -192px 0; -} - -.icon-th-large { - background-position: -216px 0; -} - -.icon-th { - background-position: -240px 0; -} - -.icon-th-list { - background-position: -264px 0; -} - -.icon-ok { - background-position: -288px 0; -} - -.icon-remove { - background-position: -312px 0; -} - -.icon-zoom-in { - background-position: -336px 0; -} - -.icon-zoom-out { - background-position: -360px 0; -} - -.icon-off { - background-position: -384px 0; -} - -.icon-signal { - background-position: -408px 0; -} - -.icon-cog { - background-position: -432px 0; -} - -.icon-trash { - background-position: -456px 0; -} - -.icon-home { - background-position: 0 -24px; -} - -.icon-file { - background-position: -24px -24px; -} - -.icon-time { - background-position: -48px -24px; -} - -.icon-road { - background-position: -72px -24px; -} - -.icon-download-alt { - background-position: -96px -24px; -} - -.icon-download { - background-position: -120px -24px; -} - -.icon-upload { - background-position: -144px -24px; -} - -.icon-inbox { - background-position: -168px -24px; -} - -.icon-play-circle { - background-position: -192px -24px; -} - -.icon-repeat { - background-position: -216px -24px; -} - -.icon-refresh { - background-position: -240px -24px; -} - -.icon-list-alt { - background-position: -264px -24px; -} - -.icon-lock { - background-position: -287px -24px; -} - -.icon-flag { - background-position: -312px -24px; -} - -.icon-headphones { - background-position: -336px -24px; -} - -.icon-volume-off { - background-position: -360px -24px; -} - -.icon-volume-down { - background-position: -384px -24px; -} - -.icon-volume-up { - background-position: -408px -24px; -} - -.icon-qrcode { - background-position: -432px -24px; -} - -.icon-barcode { - background-position: -456px -24px; -} - -.icon-tag { - background-position: 0 -48px; -} - -.icon-tags { - background-position: -25px -48px; -} - -.icon-book { - background-position: -48px -48px; -} - -.icon-bookmark { - background-position: -72px -48px; -} - -.icon-print { - background-position: -96px -48px; -} - -.icon-camera { - background-position: -120px -48px; -} - -.icon-font { - background-position: -144px -48px; -} - -.icon-bold { - background-position: -167px -48px; -} - -.icon-italic { - background-position: -192px -48px; -} - -.icon-text-height { - background-position: -216px -48px; -} - -.icon-text-width { - background-position: -240px -48px; -} - -.icon-align-left { - background-position: -264px -48px; -} - -.icon-align-center { - background-position: -288px -48px; -} - -.icon-align-right { - background-position: -312px -48px; -} - -.icon-align-justify { - background-position: -336px -48px; -} - -.icon-list { - background-position: -360px -48px; -} - -.icon-indent-left { - background-position: -384px -48px; -} - -.icon-indent-right { - background-position: -408px -48px; -} - -.icon-facetime-video { - background-position: -432px -48px; -} - -.icon-picture { - background-position: -456px -48px; -} - -.icon-pencil { - background-position: 0 -72px; -} - -.icon-map-marker { - background-position: -24px -72px; -} - -.icon-adjust { - background-position: -48px -72px; -} - -.icon-tint { - background-position: -72px -72px; -} - -.icon-edit { - background-position: -96px -72px; -} - -.icon-share { - background-position: -120px -72px; -} - -.icon-check { - background-position: -144px -72px; -} - -.icon-move { - background-position: -168px -72px; -} - -.icon-step-backward { - background-position: -192px -72px; -} - -.icon-fast-backward { - background-position: -216px -72px; -} - -.icon-backward { - background-position: -240px -72px; -} - -.icon-play { - background-position: -264px -72px; -} - -.icon-pause { - background-position: -288px -72px; -} - -.icon-stop { - background-position: -312px -72px; -} - -.icon-forward { - background-position: -336px -72px; -} - -.icon-fast-forward { - background-position: -360px -72px; -} - -.icon-step-forward { - background-position: -384px -72px; -} - -.icon-eject { - background-position: -408px -72px; -} - -.icon-chevron-left { - background-position: -432px -72px; -} - -.icon-chevron-right { - background-position: -456px -72px; -} - -.icon-plus-sign { - background-position: 0 -96px; -} - -.icon-minus-sign { - background-position: -24px -96px; -} - -.icon-remove-sign { - background-position: -48px -96px; -} - -.icon-ok-sign { - background-position: -72px -96px; -} - -.icon-question-sign { - background-position: -96px -96px; -} - -.icon-info-sign { - background-position: -120px -96px; -} - -.icon-screenshot { - background-position: -144px -96px; -} - -.icon-remove-circle { - background-position: -168px -96px; -} - -.icon-ok-circle { - background-position: -192px -96px; -} - -.icon-ban-circle { - background-position: -216px -96px; -} - -.icon-arrow-left { - background-position: -240px -96px; -} - -.icon-arrow-right { - background-position: -264px -96px; -} - -.icon-arrow-up { - background-position: -289px -96px; -} - -.icon-arrow-down { - background-position: -312px -96px; -} - -.icon-share-alt { - background-position: -336px -96px; -} - -.icon-resize-full { - background-position: -360px -96px; -} - -.icon-resize-small { - background-position: -384px -96px; -} - -.icon-plus { - background-position: -408px -96px; -} - -.icon-minus { - background-position: -433px -96px; -} - -.icon-asterisk { - background-position: -456px -96px; -} - -.icon-exclamation-sign { - background-position: 0 -120px; -} - -.icon-gift { - background-position: -24px -120px; -} - -.icon-leaf { - background-position: -48px -120px; -} - -.icon-fire { - background-position: -72px -120px; -} - -.icon-eye-open { - background-position: -96px -120px; -} - -.icon-eye-close { - background-position: -120px -120px; -} - -.icon-warning-sign { - background-position: -144px -120px; -} - -.icon-plane { - background-position: -168px -120px; -} - -.icon-calendar { - background-position: -192px -120px; -} - -.icon-random { - width: 16px; - background-position: -216px -120px; -} - -.icon-comment { - background-position: -240px -120px; -} - -.icon-magnet { - background-position: -264px -120px; -} - -.icon-chevron-up { - background-position: -288px -120px; -} - -.icon-chevron-down { - background-position: -313px -119px; -} - -.icon-retweet { - background-position: -336px -120px; -} - -.icon-shopping-cart { - background-position: -360px -120px; -} - -.icon-folder-close { - width: 16px; - background-position: -384px -120px; -} - -.icon-folder-open { - width: 16px; - background-position: -408px -120px; -} - -.icon-resize-vertical { - background-position: -432px -119px; -} - -.icon-resize-horizontal { - background-position: -456px -118px; -} - -.icon-hdd { - background-position: 0 -144px; -} - -.icon-bullhorn { - background-position: -24px -144px; -} - -.icon-bell { - background-position: -48px -144px; -} - -.icon-certificate { - background-position: -72px -144px; -} - -.icon-thumbs-up { - background-position: -96px -144px; -} - -.icon-thumbs-down { - background-position: -120px -144px; -} - -.icon-hand-right { - background-position: -144px -144px; -} - -.icon-hand-left { - background-position: -168px -144px; -} - -.icon-hand-up { - background-position: -192px -144px; -} - -.icon-hand-down { - background-position: -216px -144px; -} - -.icon-circle-arrow-right { - background-position: -240px -144px; -} - -.icon-circle-arrow-left { - background-position: -264px -144px; -} - -.icon-circle-arrow-up { - background-position: -288px -144px; -} - -.icon-circle-arrow-down { - background-position: -312px -144px; -} - -.icon-globe { - background-position: -336px -144px; -} - -.icon-wrench { - background-position: -360px -144px; -} - -.icon-tasks { - background-position: -384px -144px; -} - -.icon-filter { - background-position: -408px -144px; -} - -.icon-briefcase { - background-position: -432px -144px; -} - -.icon-fullscreen { - background-position: -456px -144px; -} - -.dropup, -.dropdown { - position: relative; -} - -.dropdown-toggle { - *margin-bottom: -3px; -} - -.dropdown-toggle:active, -.open .dropdown-toggle { - outline: 0; -} - -.caret { - display: inline-block; - width: 0; - height: 0; - vertical-align: top; - border-top: 4px solid #000000; - border-right: 4px solid transparent; - border-left: 4px solid transparent; - content: ""; -} - -.dropdown .caret { - margin-top: 8px; - margin-left: 2px; -} - -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - list-style: none; - background-color: #ffffff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - *border-right-width: 2px; - *border-bottom-width: 2px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -webkit-background-clip: padding-box; - -moz-background-clip: padding; - background-clip: padding-box; -} - -.dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.dropdown-menu .divider { - *width: 100%; - height: 1px; - margin: 9px 1px; - *margin: -5px 0 5px; - overflow: hidden; - background-color: #e5e5e5; - border-bottom: 1px solid #ffffff; -} - -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: normal; - line-height: 20px; - color: #333333; - white-space: nowrap; -} - -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus, -.dropdown-submenu:hover > a, -.dropdown-submenu:focus > a { - color: #ffffff; - text-decoration: none; - background-color: #0081c2; - background-image: -moz-linear-gradient(top, #0088cc, #0077b3); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3)); - background-image: -webkit-linear-gradient(top, #0088cc, #0077b3); - background-image: -o-linear-gradient(top, #0088cc, #0077b3); - background-image: linear-gradient(to bottom, #0088cc, #0077b3); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0); -} - -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #ffffff; - text-decoration: none; - background-color: #0081c2; - background-image: -moz-linear-gradient(top, #0088cc, #0077b3); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0077b3)); - background-image: -webkit-linear-gradient(top, #0088cc, #0077b3); - background-image: -o-linear-gradient(top, #0088cc, #0077b3); - background-image: linear-gradient(to bottom, #0088cc, #0077b3); - background-repeat: repeat-x; - outline: 0; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0077b3', GradientType=0); -} - -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #999999; -} - -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: default; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.open { - *z-index: 1000; -} - -.open > .dropdown-menu { - display: block; -} - -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} - -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - border-top: 0; - border-bottom: 4px solid #000000; - content: ""; -} - -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 1px; -} - -.dropdown-submenu { - position: relative; -} - -.dropdown-submenu > .dropdown-menu { - top: 0; - left: 100%; - margin-top: -6px; - margin-left: -1px; - -webkit-border-radius: 0 6px 6px 6px; - -moz-border-radius: 0 6px 6px 6px; - border-radius: 0 6px 6px 6px; -} - -.dropdown-submenu:hover > .dropdown-menu { - display: block; -} - -.dropup .dropdown-submenu > .dropdown-menu { - top: auto; - bottom: 0; - margin-top: 0; - margin-bottom: -2px; - -webkit-border-radius: 5px 5px 5px 0; - -moz-border-radius: 5px 5px 5px 0; - border-radius: 5px 5px 5px 0; -} - -.dropdown-submenu > a:after { - display: block; - float: right; - width: 0; - height: 0; - margin-top: 5px; - margin-right: -10px; - border-color: transparent; - border-left-color: #cccccc; - border-style: solid; - border-width: 5px 0 5px 5px; - content: " "; -} - -.dropdown-submenu:hover > a:after { - border-left-color: #ffffff; -} - -.dropdown-submenu.pull-left { - float: none; -} - -.dropdown-submenu.pull-left > .dropdown-menu { - left: -100%; - margin-left: 10px; - -webkit-border-radius: 6px 0 6px 6px; - -moz-border-radius: 6px 0 6px 6px; - border-radius: 6px 0 6px 6px; -} - -.dropdown .dropdown-menu .nav-header { - padding-right: 20px; - padding-left: 20px; -} - -.typeahead { - z-index: 1051; - margin-top: 2px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} - -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} - -.well-large { - padding: 24px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.well-small { - padding: 9px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - -moz-transition: opacity 0.15s linear; - -o-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} - -.fade.in { - opacity: 1; -} - -.collapse { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition: height 0.35s ease; - -moz-transition: height 0.35s ease; - -o-transition: height 0.35s ease; - transition: height 0.35s ease; -} - -.collapse.in { - height: auto; -} - -.close { - float: right; - font-size: 20px; - font-weight: bold; - line-height: 20px; - color: #000000; - text-shadow: 0 1px 0 #ffffff; - opacity: 0.2; - filter: alpha(opacity=20); -} - -.close:hover, -.close:focus { - color: #000000; - text-decoration: none; - cursor: pointer; - opacity: 0.4; - filter: alpha(opacity=40); -} - -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; -} - -.btn { - display: inline-block; - *display: inline; - padding: 4px 12px; - margin-bottom: 0; - *margin-left: .3em; - font-size: 14px; - line-height: 20px; - color: #333333; - text-align: center; - text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75); - vertical-align: middle; - cursor: pointer; - background-color: #f5f5f5; - *background-color: #e6e6e6; - background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6)); - background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6); - background-image: -o-linear-gradient(top, #ffffff, #e6e6e6); - background-image: linear-gradient(to bottom, #ffffff, #e6e6e6); - background-repeat: repeat-x; - border: 1px solid #cccccc; - *border: 0; - border-color: #e6e6e6 #e6e6e6 #bfbfbf; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - border-bottom-color: #b3b3b3; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe6e6e6', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - *zoom: 1; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn:hover, -.btn:focus, -.btn:active, -.btn.active, -.btn.disabled, -.btn[disabled] { - color: #333333; - background-color: #e6e6e6; - *background-color: #d9d9d9; -} - -.btn:active, -.btn.active { - background-color: #cccccc \9; -} - -.btn:first-child { - *margin-left: 0; -} - -.btn:hover, -.btn:focus { - color: #333333; - text-decoration: none; - background-position: 0 -15px; - -webkit-transition: background-position 0.1s linear; - -moz-transition: background-position 0.1s linear; - -o-transition: background-position 0.1s linear; - transition: background-position 0.1s linear; -} - -.btn:focus { - outline: thin dotted #333; - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} - -.btn.active, -.btn:active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn.disabled, -.btn[disabled] { - cursor: default; - background-image: none; - opacity: 0.65; - filter: alpha(opacity=65); - -webkit-box-shadow: none; - -moz-box-shadow: none; - box-shadow: none; -} - -.btn-large { - padding: 11px 19px; - font-size: 17.5px; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.btn-large [class^="icon-"], -.btn-large [class*=" icon-"] { - margin-top: 4px; -} - -.btn-small { - padding: 2px 10px; - font-size: 11.9px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.btn-small [class^="icon-"], -.btn-small [class*=" icon-"] { - margin-top: 0; -} - -.btn-mini [class^="icon-"], -.btn-mini [class*=" icon-"] { - margin-top: -1px; -} - -.btn-mini { - padding: 0 6px; - font-size: 10.5px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.btn-block { - display: block; - width: 100%; - padding-right: 0; - padding-left: 0; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -.btn-block + .btn-block { - margin-top: 5px; -} - -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} - -.btn-primary.active, -.btn-warning.active, -.btn-danger.active, -.btn-success.active, -.btn-info.active, -.btn-inverse.active { - color: rgba(255, 255, 255, 0.75); -} - -.btn-primary { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #006dcc; - *background-color: #0044cc; - background-image: -moz-linear-gradient(top, #0088cc, #0044cc); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc)); - background-image: -webkit-linear-gradient(top, #0088cc, #0044cc); - background-image: -o-linear-gradient(top, #0088cc, #0044cc); - background-image: linear-gradient(to bottom, #0088cc, #0044cc); - background-repeat: repeat-x; - border-color: #0044cc #0044cc #002a80; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc', endColorstr='#ff0044cc', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-primary:hover, -.btn-primary:focus, -.btn-primary:active, -.btn-primary.active, -.btn-primary.disabled, -.btn-primary[disabled] { - color: #ffffff; - background-color: #0044cc; - *background-color: #003bb3; -} - -.btn-primary:active, -.btn-primary.active { - background-color: #003399 \9; -} - -.btn-warning { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #faa732; - *background-color: #f89406; - background-image: -moz-linear-gradient(top, #fbb450, #f89406); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406)); - background-image: -webkit-linear-gradient(top, #fbb450, #f89406); - background-image: -o-linear-gradient(top, #fbb450, #f89406); - background-image: linear-gradient(to bottom, #fbb450, #f89406); - background-repeat: repeat-x; - border-color: #f89406 #f89406 #ad6704; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-warning:hover, -.btn-warning:focus, -.btn-warning:active, -.btn-warning.active, -.btn-warning.disabled, -.btn-warning[disabled] { - color: #ffffff; - background-color: #f89406; - *background-color: #df8505; -} - -.btn-warning:active, -.btn-warning.active { - background-color: #c67605 \9; -} - -.btn-danger { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #da4f49; - *background-color: #bd362f; - background-image: -moz-linear-gradient(top, #ee5f5b, #bd362f); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#bd362f)); - background-image: -webkit-linear-gradient(top, #ee5f5b, #bd362f); - background-image: -o-linear-gradient(top, #ee5f5b, #bd362f); - background-image: linear-gradient(to bottom, #ee5f5b, #bd362f); - background-repeat: repeat-x; - border-color: #bd362f #bd362f #802420; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffbd362f', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-danger:hover, -.btn-danger:focus, -.btn-danger:active, -.btn-danger.active, -.btn-danger.disabled, -.btn-danger[disabled] { - color: #ffffff; - background-color: #bd362f; - *background-color: #a9302a; -} - -.btn-danger:active, -.btn-danger.active { - background-color: #942a25 \9; -} - -.btn-success { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #5bb75b; - *background-color: #51a351; - background-image: -moz-linear-gradient(top, #62c462, #51a351); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#51a351)); - background-image: -webkit-linear-gradient(top, #62c462, #51a351); - background-image: -o-linear-gradient(top, #62c462, #51a351); - background-image: linear-gradient(to bottom, #62c462, #51a351); - background-repeat: repeat-x; - border-color: #51a351 #51a351 #387038; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff51a351', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-success:hover, -.btn-success:focus, -.btn-success:active, -.btn-success.active, -.btn-success.disabled, -.btn-success[disabled] { - color: #ffffff; - background-color: #51a351; - *background-color: #499249; -} - -.btn-success:active, -.btn-success.active { - background-color: #408140 \9; -} - -.btn-info { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #49afcd; - *background-color: #2f96b4; - background-image: -moz-linear-gradient(top, #5bc0de, #2f96b4); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#2f96b4)); - background-image: -webkit-linear-gradient(top, #5bc0de, #2f96b4); - background-image: -o-linear-gradient(top, #5bc0de, #2f96b4); - background-image: linear-gradient(to bottom, #5bc0de, #2f96b4); - background-repeat: repeat-x; - border-color: #2f96b4 #2f96b4 #1f6377; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2f96b4', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-info:hover, -.btn-info:focus, -.btn-info:active, -.btn-info.active, -.btn-info.disabled, -.btn-info[disabled] { - color: #ffffff; - background-color: #2f96b4; - *background-color: #2a85a0; -} - -.btn-info:active, -.btn-info.active { - background-color: #24748c \9; -} - -.btn-inverse { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #363636; - *background-color: #222222; - background-image: -moz-linear-gradient(top, #444444, #222222); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#444444), to(#222222)); - background-image: -webkit-linear-gradient(top, #444444, #222222); - background-image: -o-linear-gradient(top, #444444, #222222); - background-image: linear-gradient(to bottom, #444444, #222222); - background-repeat: repeat-x; - border-color: #222222 #222222 #000000; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff444444', endColorstr='#ff222222', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.btn-inverse:hover, -.btn-inverse:focus, -.btn-inverse:active, -.btn-inverse.active, -.btn-inverse.disabled, -.btn-inverse[disabled] { - color: #ffffff; - background-color: #222222; - *background-color: #151515; -} - -.btn-inverse:active, -.btn-inverse.active { - background-color: #080808 \9; -} - -button.btn, -input[type="submit"].btn { - *padding-top: 3px; - *padding-bottom: 3px; -} - -button.btn::-moz-focus-inner, -input[type="submit"].btn::-moz-focus-inner { - padding: 0; - border: 0; -} - -button.btn.btn-large, -input[type="submit"].btn.btn-large { - *padding-top: 7px; - *padding-bottom: 7px; -} - -button.btn.btn-small, -input[type="submit"].btn.btn-small { - *padding-top: 3px; - *padding-bottom: 3px; -} - -button.btn.btn-mini, -input[type="submit"].btn.btn-mini { - *padding-top: 1px; - *padding-bottom: 1px; -} - -.btn-link, -.btn-link:active, -.btn-link[disabled] { - background-color: transparent; - background-image: none; - -webkit-box-shadow: none; - -moz-box-shadow: none; - box-shadow: none; -} - -.btn-link { - color: #0088cc; - cursor: pointer; - border-color: transparent; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-link:hover, -.btn-link:focus { - color: #005580; - text-decoration: underline; - background-color: transparent; -} - -.btn-link[disabled]:hover, -.btn-link[disabled]:focus { - color: #333333; - text-decoration: none; -} - -.btn-group { - position: relative; - display: inline-block; - *display: inline; - *margin-left: .3em; - font-size: 0; - white-space: nowrap; - vertical-align: middle; - *zoom: 1; -} - -.btn-group:first-child { - *margin-left: 0; -} - -.btn-group + .btn-group { - margin-left: 5px; -} - -.btn-toolbar { - margin-top: 10px; - margin-bottom: 10px; - font-size: 0; -} - -.btn-toolbar > .btn + .btn, -.btn-toolbar > .btn-group + .btn, -.btn-toolbar > .btn + .btn-group { - margin-left: 5px; -} - -.btn-group > .btn { - position: relative; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-group > .btn + .btn { - margin-left: -1px; -} - -.btn-group > .btn, -.btn-group > .dropdown-menu, -.btn-group > .popover { - font-size: 14px; -} - -.btn-group > .btn-mini { - font-size: 10.5px; -} - -.btn-group > .btn-small { - font-size: 11.9px; -} - -.btn-group > .btn-large { - font-size: 17.5px; -} - -.btn-group > .btn:first-child { - margin-left: 0; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-topleft: 4px; -} - -.btn-group > .btn:last-child, -.btn-group > .dropdown-toggle { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-bottomright: 4px; -} - -.btn-group > .btn.large:first-child { - margin-left: 0; - -webkit-border-bottom-left-radius: 6px; - border-bottom-left-radius: 6px; - -webkit-border-top-left-radius: 6px; - border-top-left-radius: 6px; - -moz-border-radius-bottomleft: 6px; - -moz-border-radius-topleft: 6px; -} - -.btn-group > .btn.large:last-child, -.btn-group > .large.dropdown-toggle { - -webkit-border-top-right-radius: 6px; - border-top-right-radius: 6px; - -webkit-border-bottom-right-radius: 6px; - border-bottom-right-radius: 6px; - -moz-border-radius-topright: 6px; - -moz-border-radius-bottomright: 6px; -} - -.btn-group > .btn:hover, -.btn-group > .btn:focus, -.btn-group > .btn:active, -.btn-group > .btn.active { - z-index: 2; -} - -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} - -.btn-group > .btn + .dropdown-toggle { - *padding-top: 5px; - padding-right: 8px; - *padding-bottom: 5px; - padding-left: 8px; - -webkit-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 1px 0 0 rgba(255, 255, 255, 0.125), inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn-group > .btn-mini + .dropdown-toggle { - *padding-top: 2px; - padding-right: 5px; - *padding-bottom: 2px; - padding-left: 5px; -} - -.btn-group > .btn-small + .dropdown-toggle { - *padding-top: 5px; - *padding-bottom: 4px; -} - -.btn-group > .btn-large + .dropdown-toggle { - *padding-top: 7px; - padding-right: 12px; - *padding-bottom: 7px; - padding-left: 12px; -} - -.btn-group.open .dropdown-toggle { - background-image: none; - -webkit-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.15), 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.btn-group.open .btn.dropdown-toggle { - background-color: #e6e6e6; -} - -.btn-group.open .btn-primary.dropdown-toggle { - background-color: #0044cc; -} - -.btn-group.open .btn-warning.dropdown-toggle { - background-color: #f89406; -} - -.btn-group.open .btn-danger.dropdown-toggle { - background-color: #bd362f; -} - -.btn-group.open .btn-success.dropdown-toggle { - background-color: #51a351; -} - -.btn-group.open .btn-info.dropdown-toggle { - background-color: #2f96b4; -} - -.btn-group.open .btn-inverse.dropdown-toggle { - background-color: #222222; -} - -.btn .caret { - margin-top: 8px; - margin-left: 0; -} - -.btn-large .caret { - margin-top: 6px; -} - -.btn-large .caret { - border-top-width: 5px; - border-right-width: 5px; - border-left-width: 5px; -} - -.btn-mini .caret, -.btn-small .caret { - margin-top: 8px; -} - -.dropup .btn-large .caret { - border-bottom-width: 5px; -} - -.btn-primary .caret, -.btn-warning .caret, -.btn-danger .caret, -.btn-info .caret, -.btn-success .caret, -.btn-inverse .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.btn-group-vertical { - display: inline-block; - *display: inline; - /* IE7 inline-block hack */ - - *zoom: 1; -} - -.btn-group-vertical > .btn { - display: block; - float: none; - max-width: 100%; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.btn-group-vertical > .btn + .btn { - margin-top: -1px; - margin-left: 0; -} - -.btn-group-vertical > .btn:first-child { - -webkit-border-radius: 4px 4px 0 0; - -moz-border-radius: 4px 4px 0 0; - border-radius: 4px 4px 0 0; -} - -.btn-group-vertical > .btn:last-child { - -webkit-border-radius: 0 0 4px 4px; - -moz-border-radius: 0 0 4px 4px; - border-radius: 0 0 4px 4px; -} - -.btn-group-vertical > .btn-large:first-child { - -webkit-border-radius: 6px 6px 0 0; - -moz-border-radius: 6px 6px 0 0; - border-radius: 6px 6px 0 0; -} - -.btn-group-vertical > .btn-large:last-child { - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; -} - -.alert { - padding: 8px 35px 8px 14px; - margin-bottom: 20px; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); - background-color: #fcf8e3; - border: 1px solid #fbeed5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.alert, -.alert h4 { - color: #c09853; -} - -.alert h4 { - margin: 0; -} - -.alert .close { - position: relative; - top: -2px; - right: -21px; - line-height: 20px; -} - -.alert-success { - color: #468847; - background-color: #dff0d8; - border-color: #d6e9c6; -} - -.alert-success h4 { - color: #468847; -} - -.alert-danger, -.alert-error { - color: #b94a48; - background-color: #f2dede; - border-color: #eed3d7; -} - -.alert-danger h4, -.alert-error h4 { - color: #b94a48; -} - -.alert-info { - color: #3a87ad; - background-color: #d9edf7; - border-color: #bce8f1; -} - -.alert-info h4 { - color: #3a87ad; -} - -.alert-block { - padding-top: 14px; - padding-bottom: 14px; -} - -.alert-block > p, -.alert-block > ul { - margin-bottom: 0; -} - -.alert-block p + p { - margin-top: 5px; -} - -.nav { - margin-bottom: 20px; - margin-left: 0; - list-style: none; -} - -.nav > li > a { - display: block; -} - -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} - -.nav > li > a > img { - max-width: none; -} - -.nav > .pull-right { - float: right; -} - -.nav-header { - display: block; - padding: 3px 15px; - font-size: 11px; - font-weight: bold; - line-height: 20px; - color: #999999; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); - text-transform: uppercase; -} - -.nav li + .nav-header { - margin-top: 9px; -} - -.nav-list { - padding-right: 15px; - padding-left: 15px; - margin-bottom: 0; -} - -.nav-list > li > a, -.nav-list .nav-header { - margin-right: -15px; - margin-left: -15px; - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); -} - -.nav-list > li > a { - padding: 3px 15px; -} - -.nav-list > .active > a, -.nav-list > .active > a:hover, -.nav-list > .active > a:focus { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - background-color: #0088cc; -} - -.nav-list [class^="icon-"], -.nav-list [class*=" icon-"] { - margin-right: 2px; -} - -.nav-list .divider { - *width: 100%; - height: 1px; - margin: 9px 1px; - *margin: -5px 0 5px; - overflow: hidden; - background-color: #e5e5e5; - border-bottom: 1px solid #ffffff; -} - -.nav-tabs, -.nav-pills { - *zoom: 1; -} - -.nav-tabs:before, -.nav-pills:before, -.nav-tabs:after, -.nav-pills:after { - display: table; - line-height: 0; - content: ""; -} - -.nav-tabs:after, -.nav-pills:after { - clear: both; -} - -.nav-tabs > li, -.nav-pills > li { - float: left; -} - -.nav-tabs > li > a, -.nav-pills > li > a { - padding-right: 12px; - padding-left: 12px; - margin-right: 2px; - line-height: 14px; -} - -.nav-tabs { - border-bottom: 1px solid #ddd; -} - -.nav-tabs > li { - margin-bottom: -1px; -} - -.nav-tabs > li > a { - padding-top: 8px; - padding-bottom: 8px; - line-height: 20px; - border: 1px solid transparent; - -webkit-border-radius: 4px 4px 0 0; - -moz-border-radius: 4px 4px 0 0; - border-radius: 4px 4px 0 0; -} - -.nav-tabs > li > a:hover, -.nav-tabs > li > a:focus { - border-color: #eeeeee #eeeeee #dddddd; -} - -.nav-tabs > .active > a, -.nav-tabs > .active > a:hover, -.nav-tabs > .active > a:focus { - color: #555555; - cursor: default; - background-color: #ffffff; - border: 1px solid #ddd; - border-bottom-color: transparent; -} - -.nav-pills > li > a { - padding-top: 8px; - padding-bottom: 8px; - margin-top: 2px; - margin-bottom: 2px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; -} - -.nav-pills > .active > a, -.nav-pills > .active > a:hover, -.nav-pills > .active > a:focus { - color: #ffffff; - background-color: #0088cc; -} - -.nav-stacked > li { - float: none; -} - -.nav-stacked > li > a { - margin-right: 0; -} - -.nav-tabs.nav-stacked { - border-bottom: 0; -} - -.nav-tabs.nav-stacked > li > a { - border: 1px solid #ddd; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.nav-tabs.nav-stacked > li:first-child > a { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-topleft: 4px; -} - -.nav-tabs.nav-stacked > li:last-child > a { - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -moz-border-radius-bottomright: 4px; - -moz-border-radius-bottomleft: 4px; -} - -.nav-tabs.nav-stacked > li > a:hover, -.nav-tabs.nav-stacked > li > a:focus { - z-index: 2; - border-color: #ddd; -} - -.nav-pills.nav-stacked > li > a { - margin-bottom: 3px; -} - -.nav-pills.nav-stacked > li:last-child > a { - margin-bottom: 1px; -} - -.nav-tabs .dropdown-menu { - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; -} - -.nav-pills .dropdown-menu { - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.nav .dropdown-toggle .caret { - margin-top: 6px; - border-top-color: #0088cc; - border-bottom-color: #0088cc; -} - -.nav .dropdown-toggle:hover .caret, -.nav .dropdown-toggle:focus .caret { - border-top-color: #005580; - border-bottom-color: #005580; -} - -/* move down carets for tabs */ - -.nav-tabs .dropdown-toggle .caret { - margin-top: 8px; -} - -.nav .active .dropdown-toggle .caret { - border-top-color: #fff; - border-bottom-color: #fff; -} - -.nav-tabs .active .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.nav > .dropdown.active > a:hover, -.nav > .dropdown.active > a:focus { - cursor: pointer; -} - -.nav-tabs .open .dropdown-toggle, -.nav-pills .open .dropdown-toggle, -.nav > li.dropdown.open.active > a:hover, -.nav > li.dropdown.open.active > a:focus { - color: #ffffff; - background-color: #999999; - border-color: #999999; -} - -.nav li.dropdown.open .caret, -.nav li.dropdown.open.active .caret, -.nav li.dropdown.open a:hover .caret, -.nav li.dropdown.open a:focus .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; - opacity: 1; - filter: alpha(opacity=100); -} - -.tabs-stacked .open > a:hover, -.tabs-stacked .open > a:focus { - border-color: #999999; -} - -.tabbable { - *zoom: 1; -} - -.tabbable:before, -.tabbable:after { - display: table; - line-height: 0; - content: ""; -} - -.tabbable:after { - clear: both; -} - -.tab-content { - overflow: auto; -} - -.tabs-below > .nav-tabs, -.tabs-right > .nav-tabs, -.tabs-left > .nav-tabs { - border-bottom: 0; -} - -.tab-content > .tab-pane, -.pill-content > .pill-pane { - display: none; -} - -.tab-content > .active, -.pill-content > .active { - display: block; -} - -.tabs-below > .nav-tabs { - border-top: 1px solid #ddd; -} - -.tabs-below > .nav-tabs > li { - margin-top: -1px; - margin-bottom: 0; -} - -.tabs-below > .nav-tabs > li > a { - -webkit-border-radius: 0 0 4px 4px; - -moz-border-radius: 0 0 4px 4px; - border-radius: 0 0 4px 4px; -} - -.tabs-below > .nav-tabs > li > a:hover, -.tabs-below > .nav-tabs > li > a:focus { - border-top-color: #ddd; - border-bottom-color: transparent; -} - -.tabs-below > .nav-tabs > .active > a, -.tabs-below > .nav-tabs > .active > a:hover, -.tabs-below > .nav-tabs > .active > a:focus { - border-color: transparent #ddd #ddd #ddd; -} - -.tabs-left > .nav-tabs > li, -.tabs-right > .nav-tabs > li { - float: none; -} - -.tabs-left > .nav-tabs > li > a, -.tabs-right > .nav-tabs > li > a { - min-width: 74px; - margin-right: 0; - margin-bottom: 3px; -} - -.tabs-left > .nav-tabs { - float: left; - margin-right: 19px; - border-right: 1px solid #ddd; -} - -.tabs-left > .nav-tabs > li > a { - margin-right: -1px; - -webkit-border-radius: 4px 0 0 4px; - -moz-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; -} - -.tabs-left > .nav-tabs > li > a:hover, -.tabs-left > .nav-tabs > li > a:focus { - border-color: #eeeeee #dddddd #eeeeee #eeeeee; -} - -.tabs-left > .nav-tabs .active > a, -.tabs-left > .nav-tabs .active > a:hover, -.tabs-left > .nav-tabs .active > a:focus { - border-color: #ddd transparent #ddd #ddd; - *border-right-color: #ffffff; -} - -.tabs-right > .nav-tabs { - float: right; - margin-left: 19px; - border-left: 1px solid #ddd; -} - -.tabs-right > .nav-tabs > li > a { - margin-left: -1px; - -webkit-border-radius: 0 4px 4px 0; - -moz-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; -} - -.tabs-right > .nav-tabs > li > a:hover, -.tabs-right > .nav-tabs > li > a:focus { - border-color: #eeeeee #eeeeee #eeeeee #dddddd; -} - -.tabs-right > .nav-tabs .active > a, -.tabs-right > .nav-tabs .active > a:hover, -.tabs-right > .nav-tabs .active > a:focus { - border-color: #ddd #ddd #ddd transparent; - *border-left-color: #ffffff; -} - -.nav > .disabled > a { - color: #999999; -} - -.nav > .disabled > a:hover, -.nav > .disabled > a:focus { - text-decoration: none; - cursor: default; - background-color: transparent; -} - -.navbar { - *position: relative; - *z-index: 2; - margin-bottom: 20px; - overflow: visible; -} - -.navbar-inner { - min-height: 40px; - padding-right: 20px; - padding-left: 20px; - background-color: #fafafa; - background-image: -moz-linear-gradient(top, #ffffff, #f2f2f2); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#f2f2f2)); - background-image: -webkit-linear-gradient(top, #ffffff, #f2f2f2); - background-image: -o-linear-gradient(top, #ffffff, #f2f2f2); - background-image: linear-gradient(to bottom, #ffffff, #f2f2f2); - background-repeat: repeat-x; - border: 1px solid #d4d4d4; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff2f2f2', GradientType=0); - *zoom: 1; - -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); -} - -.navbar-inner:before, -.navbar-inner:after { - display: table; - line-height: 0; - content: ""; -} - -.navbar-inner:after { - clear: both; -} - -.navbar .container { - width: auto; -} - -.nav-collapse.collapse { - height: auto; - overflow: visible; -} - -.navbar .brand { - display: block; - float: left; - padding: 10px 20px 10px; - margin-left: -20px; - font-size: 20px; - font-weight: 200; - color: #777777; - text-shadow: 0 1px 0 #ffffff; -} - -.navbar .brand:hover, -.navbar .brand:focus { - text-decoration: none; -} - -.navbar-text { - margin-bottom: 0; - line-height: 40px; - color: #777777; -} - -.navbar-link { - color: #777777; -} - -.navbar-link:hover, -.navbar-link:focus { - color: #333333; -} - -.navbar .divider-vertical { - height: 40px; - margin: 0 9px; - border-right: 1px solid #ffffff; - border-left: 1px solid #f2f2f2; -} - -.navbar .btn, -.navbar .btn-group { - margin-top: 5px; -} - -.navbar .btn-group .btn, -.navbar .input-prepend .btn, -.navbar .input-append .btn, -.navbar .input-prepend .btn-group, -.navbar .input-append .btn-group { - margin-top: 0; -} - -.navbar-form { - margin-bottom: 0; - *zoom: 1; -} - -.navbar-form:before, -.navbar-form:after { - display: table; - line-height: 0; - content: ""; -} - -.navbar-form:after { - clear: both; -} - -.navbar-form input, -.navbar-form select, -.navbar-form .radio, -.navbar-form .checkbox { - margin-top: 5px; -} - -.navbar-form input, -.navbar-form select, -.navbar-form .btn { - display: inline-block; - margin-bottom: 0; -} - -.navbar-form input[type="image"], -.navbar-form input[type="checkbox"], -.navbar-form input[type="radio"] { - margin-top: 3px; -} - -.navbar-form .input-append, -.navbar-form .input-prepend { - margin-top: 5px; - white-space: nowrap; -} - -.navbar-form .input-append input, -.navbar-form .input-prepend input { - margin-top: 0; -} - -.navbar-search { - position: relative; - float: left; - margin-top: 5px; - margin-bottom: 0; -} - -.navbar-search .search-query { - padding: 4px 14px; - margin-bottom: 0; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 13px; - font-weight: normal; - line-height: 1; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -.navbar-static-top { - position: static; - margin-bottom: 0; -} - -.navbar-static-top .navbar-inner { - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - z-index: 1030; - margin-bottom: 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-static-top .navbar-inner { - border-width: 0 0 1px; -} - -.navbar-fixed-bottom .navbar-inner { - border-width: 1px 0 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-fixed-bottom .navbar-inner { - padding-right: 0; - padding-left: 0; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} - -.navbar-static-top .container, -.navbar-fixed-top .container, -.navbar-fixed-bottom .container { - width: 940px; -} - -.navbar-fixed-top { - top: 0; -} - -.navbar-fixed-top .navbar-inner, -.navbar-static-top .navbar-inner { - -webkit-box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); - box-shadow: 0 1px 10px rgba(0, 0, 0, 0.1); -} - -.navbar-fixed-bottom { - bottom: 0; -} - -.navbar-fixed-bottom .navbar-inner { - -webkit-box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); - -moz-box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); - box-shadow: 0 -1px 10px rgba(0, 0, 0, 0.1); -} - -.navbar .nav { - position: relative; - left: 0; - display: block; - float: left; - margin: 0 10px 0 0; -} - -.navbar .nav.pull-right { - float: right; - margin-right: 0; -} - -.navbar .nav > li { - float: left; -} - -.navbar .nav > li > a { - float: none; - padding: 10px 15px 10px; - color: #777777; - text-decoration: none; - text-shadow: 0 1px 0 #ffffff; -} - -.navbar .nav .dropdown-toggle .caret { - margin-top: 8px; -} - -.navbar .nav > li > a:focus, -.navbar .nav > li > a:hover { - color: #333333; - text-decoration: none; - background-color: transparent; -} - -.navbar .nav > .active > a, -.navbar .nav > .active > a:hover, -.navbar .nav > .active > a:focus { - color: #555555; - text-decoration: none; - background-color: #e5e5e5; - -webkit-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - -moz-box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 8px rgba(0, 0, 0, 0.125); -} - -.navbar .btn-navbar { - display: none; - float: right; - padding: 7px 10px; - margin-right: 5px; - margin-left: 5px; - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #ededed; - *background-color: #e5e5e5; - background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5)); - background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5); - background-repeat: repeat-x; - border-color: #e5e5e5 #e5e5e5 #bfbfbf; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.075); -} - -.navbar .btn-navbar:hover, -.navbar .btn-navbar:focus, -.navbar .btn-navbar:active, -.navbar .btn-navbar.active, -.navbar .btn-navbar.disabled, -.navbar .btn-navbar[disabled] { - color: #ffffff; - background-color: #e5e5e5; - *background-color: #d9d9d9; -} - -.navbar .btn-navbar:active, -.navbar .btn-navbar.active { - background-color: #cccccc \9; -} - -.navbar .btn-navbar .icon-bar { - display: block; - width: 18px; - height: 2px; - background-color: #f5f5f5; - -webkit-border-radius: 1px; - -moz-border-radius: 1px; - border-radius: 1px; - -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); - -moz-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); - box-shadow: 0 1px 0 rgba(0, 0, 0, 0.25); -} - -.btn-navbar .icon-bar + .icon-bar { - margin-top: 3px; -} - -.navbar .nav > li > .dropdown-menu:before { - position: absolute; - top: -7px; - left: 9px; - display: inline-block; - border-right: 7px solid transparent; - border-bottom: 7px solid #ccc; - border-left: 7px solid transparent; - border-bottom-color: rgba(0, 0, 0, 0.2); - content: ''; -} - -.navbar .nav > li > .dropdown-menu:after { - position: absolute; - top: -6px; - left: 10px; - display: inline-block; - border-right: 6px solid transparent; - border-bottom: 6px solid #ffffff; - border-left: 6px solid transparent; - content: ''; -} - -.navbar-fixed-bottom .nav > li > .dropdown-menu:before { - top: auto; - bottom: -7px; - border-top: 7px solid #ccc; - border-bottom: 0; - border-top-color: rgba(0, 0, 0, 0.2); -} - -.navbar-fixed-bottom .nav > li > .dropdown-menu:after { - top: auto; - bottom: -6px; - border-top: 6px solid #ffffff; - border-bottom: 0; -} - -.navbar .nav li.dropdown > a:hover .caret, -.navbar .nav li.dropdown > a:focus .caret { - border-top-color: #333333; - border-bottom-color: #333333; -} - -.navbar .nav li.dropdown.open > .dropdown-toggle, -.navbar .nav li.dropdown.active > .dropdown-toggle, -.navbar .nav li.dropdown.open.active > .dropdown-toggle { - color: #555555; - background-color: #e5e5e5; -} - -.navbar .nav li.dropdown > .dropdown-toggle .caret { - border-top-color: #777777; - border-bottom-color: #777777; -} - -.navbar .nav li.dropdown.open > .dropdown-toggle .caret, -.navbar .nav li.dropdown.active > .dropdown-toggle .caret, -.navbar .nav li.dropdown.open.active > .dropdown-toggle .caret { - border-top-color: #555555; - border-bottom-color: #555555; -} - -.navbar .pull-right > li > .dropdown-menu, -.navbar .nav > li > .dropdown-menu.pull-right { - right: 0; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu:before, -.navbar .nav > li > .dropdown-menu.pull-right:before { - right: 12px; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu:after, -.navbar .nav > li > .dropdown-menu.pull-right:after { - right: 13px; - left: auto; -} - -.navbar .pull-right > li > .dropdown-menu .dropdown-menu, -.navbar .nav > li > .dropdown-menu.pull-right .dropdown-menu { - right: 100%; - left: auto; - margin-right: -1px; - margin-left: 0; - -webkit-border-radius: 6px 0 6px 6px; - -moz-border-radius: 6px 0 6px 6px; - border-radius: 6px 0 6px 6px; -} - -.navbar-inverse .navbar-inner { - background-color: #1b1b1b; - background-image: -moz-linear-gradient(top, #222222, #111111); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#222222), to(#111111)); - background-image: -webkit-linear-gradient(top, #222222, #111111); - background-image: -o-linear-gradient(top, #222222, #111111); - background-image: linear-gradient(to bottom, #222222, #111111); - background-repeat: repeat-x; - border-color: #252525; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff111111', GradientType=0); -} - -.navbar-inverse .brand, -.navbar-inverse .nav > li > a { - color: #999999; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} - -.navbar-inverse .brand:hover, -.navbar-inverse .nav > li > a:hover, -.navbar-inverse .brand:focus, -.navbar-inverse .nav > li > a:focus { - color: #ffffff; -} - -.navbar-inverse .brand { - color: #999999; -} - -.navbar-inverse .navbar-text { - color: #999999; -} - -.navbar-inverse .nav > li > a:focus, -.navbar-inverse .nav > li > a:hover { - color: #ffffff; - background-color: transparent; -} - -.navbar-inverse .nav .active > a, -.navbar-inverse .nav .active > a:hover, -.navbar-inverse .nav .active > a:focus { - color: #ffffff; - background-color: #111111; -} - -.navbar-inverse .navbar-link { - color: #999999; -} - -.navbar-inverse .navbar-link:hover, -.navbar-inverse .navbar-link:focus { - color: #ffffff; -} - -.navbar-inverse .divider-vertical { - border-right-color: #222222; - border-left-color: #111111; -} - -.navbar-inverse .nav li.dropdown.open > .dropdown-toggle, -.navbar-inverse .nav li.dropdown.active > .dropdown-toggle, -.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle { - color: #ffffff; - background-color: #111111; -} - -.navbar-inverse .nav li.dropdown > a:hover .caret, -.navbar-inverse .nav li.dropdown > a:focus .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .nav li.dropdown > .dropdown-toggle .caret { - border-top-color: #999999; - border-bottom-color: #999999; -} - -.navbar-inverse .nav li.dropdown.open > .dropdown-toggle .caret, -.navbar-inverse .nav li.dropdown.active > .dropdown-toggle .caret, -.navbar-inverse .nav li.dropdown.open.active > .dropdown-toggle .caret { - border-top-color: #ffffff; - border-bottom-color: #ffffff; -} - -.navbar-inverse .navbar-search .search-query { - color: #ffffff; - background-color: #515151; - border-color: #111111; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1), 0 1px 0 rgba(255, 255, 255, 0.15); - -webkit-transition: none; - -moz-transition: none; - -o-transition: none; - transition: none; -} - -.navbar-inverse .navbar-search .search-query:-moz-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query:-ms-input-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder { - color: #cccccc; -} - -.navbar-inverse .navbar-search .search-query:focus, -.navbar-inverse .navbar-search .search-query.focused { - padding: 5px 15px; - color: #333333; - text-shadow: 0 1px 0 #ffffff; - background-color: #ffffff; - border: 0; - outline: 0; - -webkit-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); - -moz-box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); - box-shadow: 0 0 3px rgba(0, 0, 0, 0.15); -} - -.navbar-inverse .btn-navbar { - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #0e0e0e; - *background-color: #040404; - background-image: -moz-linear-gradient(top, #151515, #040404); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#151515), to(#040404)); - background-image: -webkit-linear-gradient(top, #151515, #040404); - background-image: -o-linear-gradient(top, #151515, #040404); - background-image: linear-gradient(to bottom, #151515, #040404); - background-repeat: repeat-x; - border-color: #040404 #040404 #000000; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff151515', endColorstr='#ff040404', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); -} - -.navbar-inverse .btn-navbar:hover, -.navbar-inverse .btn-navbar:focus, -.navbar-inverse .btn-navbar:active, -.navbar-inverse .btn-navbar.active, -.navbar-inverse .btn-navbar.disabled, -.navbar-inverse .btn-navbar[disabled] { - color: #ffffff; - background-color: #040404; - *background-color: #000000; -} - -.navbar-inverse .btn-navbar:active, -.navbar-inverse .btn-navbar.active { - background-color: #000000 \9; -} - -.breadcrumb { - padding: 8px 15px; - margin: 0 0 20px; - list-style: none; - background-color: #f5f5f5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.breadcrumb > li { - display: inline-block; - *display: inline; - text-shadow: 0 1px 0 #ffffff; - *zoom: 1; -} - -.breadcrumb > li > .divider { - padding: 0 5px; - color: #ccc; -} - -.breadcrumb > .active { - color: #999999; -} - -.pagination { - margin: 20px 0; -} - -.pagination ul { - display: inline-block; - *display: inline; - margin-bottom: 0; - margin-left: 0; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - *zoom: 1; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - -moz-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} - -.pagination ul > li { - display: inline; -} - -.pagination ul > li > a, -.pagination ul > li > span { - float: left; - padding: 4px 12px; - line-height: 20px; - text-decoration: none; - background-color: #ffffff; - border: 1px solid #dddddd; - border-left-width: 0; -} - -.pagination ul > li > a:hover, -.pagination ul > li > a:focus, -.pagination ul > .active > a, -.pagination ul > .active > span { - background-color: #f5f5f5; -} - -.pagination ul > .active > a, -.pagination ul > .active > span { - color: #999999; - cursor: default; -} - -.pagination ul > .disabled > span, -.pagination ul > .disabled > a, -.pagination ul > .disabled > a:hover, -.pagination ul > .disabled > a:focus { - color: #999999; - cursor: default; - background-color: transparent; -} - -.pagination ul > li:first-child > a, -.pagination ul > li:first-child > span { - border-left-width: 1px; - -webkit-border-bottom-left-radius: 4px; - border-bottom-left-radius: 4px; - -webkit-border-top-left-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-topleft: 4px; -} - -.pagination ul > li:last-child > a, -.pagination ul > li:last-child > span { - -webkit-border-top-right-radius: 4px; - border-top-right-radius: 4px; - -webkit-border-bottom-right-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-bottomright: 4px; -} - -.pagination-centered { - text-align: center; -} - -.pagination-right { - text-align: right; -} - -.pagination-large ul > li > a, -.pagination-large ul > li > span { - padding: 11px 19px; - font-size: 17.5px; -} - -.pagination-large ul > li:first-child > a, -.pagination-large ul > li:first-child > span { - -webkit-border-bottom-left-radius: 6px; - border-bottom-left-radius: 6px; - -webkit-border-top-left-radius: 6px; - border-top-left-radius: 6px; - -moz-border-radius-bottomleft: 6px; - -moz-border-radius-topleft: 6px; -} - -.pagination-large ul > li:last-child > a, -.pagination-large ul > li:last-child > span { - -webkit-border-top-right-radius: 6px; - border-top-right-radius: 6px; - -webkit-border-bottom-right-radius: 6px; - border-bottom-right-radius: 6px; - -moz-border-radius-topright: 6px; - -moz-border-radius-bottomright: 6px; -} - -.pagination-mini ul > li:first-child > a, -.pagination-small ul > li:first-child > a, -.pagination-mini ul > li:first-child > span, -.pagination-small ul > li:first-child > span { - -webkit-border-bottom-left-radius: 3px; - border-bottom-left-radius: 3px; - -webkit-border-top-left-radius: 3px; - border-top-left-radius: 3px; - -moz-border-radius-bottomleft: 3px; - -moz-border-radius-topleft: 3px; -} - -.pagination-mini ul > li:last-child > a, -.pagination-small ul > li:last-child > a, -.pagination-mini ul > li:last-child > span, -.pagination-small ul > li:last-child > span { - -webkit-border-top-right-radius: 3px; - border-top-right-radius: 3px; - -webkit-border-bottom-right-radius: 3px; - border-bottom-right-radius: 3px; - -moz-border-radius-topright: 3px; - -moz-border-radius-bottomright: 3px; -} - -.pagination-small ul > li > a, -.pagination-small ul > li > span { - padding: 2px 10px; - font-size: 11.9px; -} - -.pagination-mini ul > li > a, -.pagination-mini ul > li > span { - padding: 0 6px; - font-size: 10.5px; -} - -.pager { - margin: 20px 0; - text-align: center; - list-style: none; - *zoom: 1; -} - -.pager:before, -.pager:after { - display: table; - line-height: 0; - content: ""; -} - -.pager:after { - clear: both; -} - -.pager li { - display: inline; -} - -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #fff; - border: 1px solid #ddd; - -webkit-border-radius: 15px; - -moz-border-radius: 15px; - border-radius: 15px; -} - -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #f5f5f5; -} - -.pager .next > a, -.pager .next > span { - float: right; -} - -.pager .previous > a, -.pager .previous > span { - float: left; -} - -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #999999; - cursor: default; - background-color: #fff; -} - -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - background-color: #000000; -} - -.modal-backdrop.fade { - opacity: 0; -} - -.modal-backdrop, -.modal-backdrop.fade.in { - opacity: 0.8; - filter: alpha(opacity=80); -} - -.modal { - position: fixed; - top: 10%; - left: 50%; - z-index: 1050; - width: 560px; - margin-left: -280px; - background-color: #ffffff; - border: 1px solid #999; - border: 1px solid rgba(0, 0, 0, 0.3); - *border: 1px solid #999; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - outline: none; - -webkit-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - -moz-box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - box-shadow: 0 3px 7px rgba(0, 0, 0, 0.3); - -webkit-background-clip: padding-box; - -moz-background-clip: padding-box; - background-clip: padding-box; -} - -.modal.fade { - top: -25%; - -webkit-transition: opacity 0.3s linear, top 0.3s ease-out; - -moz-transition: opacity 0.3s linear, top 0.3s ease-out; - -o-transition: opacity 0.3s linear, top 0.3s ease-out; - transition: opacity 0.3s linear, top 0.3s ease-out; -} - -.modal.fade.in { - top: 10%; -} - -.modal-header { - padding: 9px 15px; - border-bottom: 1px solid #eee; -} - -.modal-header .close { - margin-top: 2px; -} - -.modal-header h3 { - margin: 0; - line-height: 30px; -} - -.modal-body { - position: relative; - max-height: 400px; - padding: 15px; - overflow-y: auto; -} - -.modal-form { - margin-bottom: 0; -} - -.modal-footer { - padding: 14px 15px 15px; - margin-bottom: 0; - text-align: right; - background-color: #f5f5f5; - border-top: 1px solid #ddd; - -webkit-border-radius: 0 0 6px 6px; - -moz-border-radius: 0 0 6px 6px; - border-radius: 0 0 6px 6px; - *zoom: 1; - -webkit-box-shadow: inset 0 1px 0 #ffffff; - -moz-box-shadow: inset 0 1px 0 #ffffff; - box-shadow: inset 0 1px 0 #ffffff; -} - -.modal-footer:before, -.modal-footer:after { - display: table; - line-height: 0; - content: ""; -} - -.modal-footer:after { - clear: both; -} - -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} - -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} - -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} - -.tooltip { - position: absolute; - z-index: 1030; - display: block; - font-size: 11px; - line-height: 1.4; - opacity: 0; - filter: alpha(opacity=0); - visibility: visible; -} - -.tooltip.in { - opacity: 0.8; - filter: alpha(opacity=80); -} - -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} - -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} - -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} - -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} - -.tooltip-inner { - max-width: 200px; - padding: 8px; - color: #ffffff; - text-align: center; - text-decoration: none; - background-color: #000000; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-top-color: #000000; - border-width: 5px 5px 0; -} - -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-right-color: #000000; - border-width: 5px 5px 5px 0; -} - -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-left-color: #000000; - border-width: 5px 0 5px 5px; -} - -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-bottom-color: #000000; - border-width: 0 5px 5px; -} - -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1010; - display: none; - max-width: 276px; - padding: 1px; - text-align: left; - white-space: normal; - background-color: #ffffff; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -moz-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - -webkit-background-clip: padding-box; - -moz-background-clip: padding; - background-clip: padding-box; -} - -.popover.top { - margin-top: -10px; -} - -.popover.right { - margin-left: 10px; -} - -.popover.bottom { - margin-top: 10px; -} - -.popover.left { - margin-left: -10px; -} - -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - font-weight: normal; - line-height: 18px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - -webkit-border-radius: 5px 5px 0 0; - -moz-border-radius: 5px 5px 0 0; - border-radius: 5px 5px 0 0; -} - -.popover-title:empty { - display: none; -} - -.popover-content { - padding: 9px 14px; -} - -.popover .arrow, -.popover .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} - -.popover .arrow { - border-width: 11px; -} - -.popover .arrow:after { - border-width: 10px; - content: ""; -} - -.popover.top .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} - -.popover.top .arrow:after { - bottom: 1px; - margin-left: -10px; - border-top-color: #ffffff; - border-bottom-width: 0; -} - -.popover.right .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} - -.popover.right .arrow:after { - bottom: -10px; - left: 1px; - border-right-color: #ffffff; - border-left-width: 0; -} - -.popover.bottom .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-bottom-color: #999; - border-bottom-color: rgba(0, 0, 0, 0.25); - border-top-width: 0; -} - -.popover.bottom .arrow:after { - top: 1px; - margin-left: -10px; - border-bottom-color: #ffffff; - border-top-width: 0; -} - -.popover.left .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-left-color: #999; - border-left-color: rgba(0, 0, 0, 0.25); - border-right-width: 0; -} - -.popover.left .arrow:after { - right: 1px; - bottom: -10px; - border-left-color: #ffffff; - border-right-width: 0; -} - -.thumbnails { - margin-left: -20px; - list-style: none; - *zoom: 1; -} - -.thumbnails:before, -.thumbnails:after { - display: table; - line-height: 0; - content: ""; -} - -.thumbnails:after { - clear: both; -} - -.row-fluid .thumbnails { - margin-left: 0; -} - -.thumbnails > li { - float: left; - margin-bottom: 20px; - margin-left: 20px; -} - -.thumbnail { - display: block; - padding: 4px; - line-height: 20px; - border: 1px solid #ddd; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - -moz-box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - box-shadow: 0 1px 3px rgba(0, 0, 0, 0.055); - -webkit-transition: all 0.2s ease-in-out; - -moz-transition: all 0.2s ease-in-out; - -o-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; -} - -a.thumbnail:hover, -a.thumbnail:focus { - border-color: #0088cc; - -webkit-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); - -moz-box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); - box-shadow: 0 1px 4px rgba(0, 105, 214, 0.25); -} - -.thumbnail > img { - display: block; - max-width: 100%; - margin-right: auto; - margin-left: auto; -} - -.thumbnail .caption { - padding: 9px; - color: #555555; -} - -.media, -.media-body { - overflow: hidden; - *overflow: visible; - zoom: 1; -} - -.media, -.media .media { - margin-top: 15px; -} - -.media:first-child { - margin-top: 0; -} - -.media-object { - display: block; -} - -.media-heading { - margin: 0 0 5px; -} - -.media > .pull-left { - margin-right: 10px; -} - -.media > .pull-right { - margin-left: 10px; -} - -.media-list { - margin-left: 0; - list-style: none; -} - -.label, -.badge { - display: inline-block; - padding: 2px 4px; - font-size: 11.844px; - font-weight: bold; - line-height: 14px; - color: #ffffff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - white-space: nowrap; - vertical-align: baseline; - background-color: #999999; -} - -.label { - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; -} - -.badge { - padding-right: 9px; - padding-left: 9px; - -webkit-border-radius: 9px; - -moz-border-radius: 9px; - border-radius: 9px; -} - -.label:empty, -.badge:empty { - display: none; -} - -a.label:hover, -a.label:focus, -a.badge:hover, -a.badge:focus { - color: #ffffff; - text-decoration: none; - cursor: pointer; -} - -.label-important, -.badge-important { - background-color: #b94a48; -} - -.label-important[href], -.badge-important[href] { - background-color: #953b39; -} - -.label-warning, -.badge-warning { - background-color: #f89406; -} - -.label-warning[href], -.badge-warning[href] { - background-color: #c67605; -} - -.label-success, -.badge-success { - background-color: #468847; -} - -.label-success[href], -.badge-success[href] { - background-color: #356635; -} - -.label-info, -.badge-info { - background-color: #3a87ad; -} - -.label-info[href], -.badge-info[href] { - background-color: #2d6987; -} - -.label-inverse, -.badge-inverse { - background-color: #333333; -} - -.label-inverse[href], -.badge-inverse[href] { - background-color: #1a1a1a; -} - -.btn .label, -.btn .badge { - position: relative; - top: -1px; -} - -.btn-mini .label, -.btn-mini .badge { - top: 0; -} - -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-moz-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-ms-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -@-o-keyframes progress-bar-stripes { - from { - background-position: 0 0; - } - to { - background-position: 40px 0; - } -} - -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} - -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f7f7f7; - background-image: -moz-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f5f5f5), to(#f9f9f9)); - background-image: -webkit-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: -o-linear-gradient(top, #f5f5f5, #f9f9f9); - background-image: linear-gradient(to bottom, #f5f5f5, #f9f9f9); - background-repeat: repeat-x; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#fff9f9f9', GradientType=0); - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - -moz-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} - -.progress .bar { - float: left; - width: 0; - height: 100%; - font-size: 12px; - color: #ffffff; - text-align: center; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: #0e90d2; - background-image: -moz-linear-gradient(top, #149bdf, #0480be); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#149bdf), to(#0480be)); - background-image: -webkit-linear-gradient(top, #149bdf, #0480be); - background-image: -o-linear-gradient(top, #149bdf, #0480be); - background-image: linear-gradient(to bottom, #149bdf, #0480be); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf', endColorstr='#ff0480be', GradientType=0); - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -moz-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - -webkit-transition: width 0.6s ease; - -moz-transition: width 0.6s ease; - -o-transition: width 0.6s ease; - transition: width 0.6s ease; -} - -.progress .bar + .bar { - -webkit-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -moz-box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 1px 0 0 rgba(0, 0, 0, 0.15), inset 0 -1px 0 rgba(0, 0, 0, 0.15); -} - -.progress-striped .bar { - background-color: #149bdf; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - -webkit-background-size: 40px 40px; - -moz-background-size: 40px 40px; - -o-background-size: 40px 40px; - background-size: 40px 40px; -} - -.progress.active .bar { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -moz-animation: progress-bar-stripes 2s linear infinite; - -ms-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} - -.progress-danger .bar, -.progress .bar-danger { - background-color: #dd514c; - background-image: -moz-linear-gradient(top, #ee5f5b, #c43c35); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#c43c35)); - background-image: -webkit-linear-gradient(top, #ee5f5b, #c43c35); - background-image: -o-linear-gradient(top, #ee5f5b, #c43c35); - background-image: linear-gradient(to bottom, #ee5f5b, #c43c35); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b', endColorstr='#ffc43c35', GradientType=0); -} - -.progress-danger.progress-striped .bar, -.progress-striped .bar-danger { - background-color: #ee5f5b; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-success .bar, -.progress .bar-success { - background-color: #5eb95e; - background-image: -moz-linear-gradient(top, #62c462, #57a957); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#62c462), to(#57a957)); - background-image: -webkit-linear-gradient(top, #62c462, #57a957); - background-image: -o-linear-gradient(top, #62c462, #57a957); - background-image: linear-gradient(to bottom, #62c462, #57a957); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462', endColorstr='#ff57a957', GradientType=0); -} - -.progress-success.progress-striped .bar, -.progress-striped .bar-success { - background-color: #62c462; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-info .bar, -.progress .bar-info { - background-color: #4bb1cf; - background-image: -moz-linear-gradient(top, #5bc0de, #339bb9); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#5bc0de), to(#339bb9)); - background-image: -webkit-linear-gradient(top, #5bc0de, #339bb9); - background-image: -o-linear-gradient(top, #5bc0de, #339bb9); - background-image: linear-gradient(to bottom, #5bc0de, #339bb9); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff339bb9', GradientType=0); -} - -.progress-info.progress-striped .bar, -.progress-striped .bar-info { - background-color: #5bc0de; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.progress-warning .bar, -.progress .bar-warning { - background-color: #faa732; - background-image: -moz-linear-gradient(top, #fbb450, #f89406); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fbb450), to(#f89406)); - background-image: -webkit-linear-gradient(top, #fbb450, #f89406); - background-image: -o-linear-gradient(top, #fbb450, #f89406); - background-image: linear-gradient(to bottom, #fbb450, #f89406); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450', endColorstr='#fff89406', GradientType=0); -} - -.progress-warning.progress-striped .bar, -.progress-striped .bar-warning { - background-color: #fbb450; - background-image: -webkit-gradient(linear, 0 100%, 100% 0, color-stop(0.25, rgba(255, 255, 255, 0.15)), color-stop(0.25, transparent), color-stop(0.5, transparent), color-stop(0.5, rgba(255, 255, 255, 0.15)), color-stop(0.75, rgba(255, 255, 255, 0.15)), color-stop(0.75, transparent), to(transparent)); - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -moz-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} - -.accordion { - margin-bottom: 20px; -} - -.accordion-group { - margin-bottom: 2px; - border: 1px solid #e5e5e5; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} - -.accordion-heading { - border-bottom: 0; -} - -.accordion-heading .accordion-toggle { - display: block; - padding: 8px 15px; -} - -.accordion-toggle { - cursor: pointer; -} - -.accordion-inner { - padding: 9px 15px; - border-top: 1px solid #e5e5e5; -} - -.carousel { - position: relative; - margin-bottom: 20px; - line-height: 1; -} - -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} - -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - -moz-transition: 0.6s ease-in-out left; - -o-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} - -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - line-height: 1; -} - -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} - -.carousel-inner > .active { - left: 0; -} - -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} - -.carousel-inner > .next { - left: 100%; -} - -.carousel-inner > .prev { - left: -100%; -} - -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} - -.carousel-inner > .active.left { - left: -100%; -} - -.carousel-inner > .active.right { - left: 100%; -} - -.carousel-control { - position: absolute; - top: 40%; - left: 15px; - width: 40px; - height: 40px; - margin-top: -20px; - font-size: 60px; - font-weight: 100; - line-height: 30px; - color: #ffffff; - text-align: center; - background: #222222; - border: 3px solid #ffffff; - -webkit-border-radius: 23px; - -moz-border-radius: 23px; - border-radius: 23px; - opacity: 0.5; - filter: alpha(opacity=50); -} - -.carousel-control.right { - right: 15px; - left: auto; -} - -.carousel-control:hover, -.carousel-control:focus { - color: #ffffff; - text-decoration: none; - opacity: 0.9; - filter: alpha(opacity=90); -} - -.carousel-indicators { - position: absolute; - top: 15px; - right: 15px; - z-index: 5; - margin: 0; - list-style: none; -} - -.carousel-indicators li { - display: block; - float: left; - width: 10px; - height: 10px; - margin-left: 5px; - text-indent: -999px; - background-color: #ccc; - background-color: rgba(255, 255, 255, 0.25); - border-radius: 5px; -} - -.carousel-indicators .active { - background-color: #fff; -} - -.carousel-caption { - position: absolute; - right: 0; - bottom: 0; - left: 0; - padding: 15px; - background: #333333; - background: rgba(0, 0, 0, 0.75); -} - -.carousel-caption h4, -.carousel-caption p { - line-height: 20px; - color: #ffffff; -} - -.carousel-caption h4 { - margin: 0 0 5px; -} - -.carousel-caption p { - margin-bottom: 0; -} - -.hero-unit { - padding: 60px; - margin-bottom: 30px; - font-size: 18px; - font-weight: 200; - line-height: 30px; - color: inherit; - background-color: #eeeeee; - -webkit-border-radius: 6px; - -moz-border-radius: 6px; - border-radius: 6px; -} - -.hero-unit h1 { - margin-bottom: 0; - font-size: 60px; - line-height: 1; - letter-spacing: -1px; - color: inherit; -} - -.hero-unit li { - line-height: 30px; -} - -.pull-right { - float: right; -} - -.pull-left { - float: left; -} - -.hide { - display: none; -} - -.show { - display: block; -} - -.invisible { - visibility: hidden; -} - -.affix { - position: fixed; -} diff --git a/docs/theme/docker/static/css/bootstrap.min.css b/docs/theme/docker/static/css/bootstrap.min.css deleted file mode 100755 index fd5ed73407..0000000000 --- a/docs/theme/docker/static/css/bootstrap.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap v2.3.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}a:hover,a:active{outline:0}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{width:auto\9;height:auto;max-width:100%;vertical-align:middle;border:0;-ms-interpolation-mode:bicubic}#map_canvas img,.google-maps img{max-width:none}button,input,select,textarea{margin:0;font-size:100%;vertical-align:middle}button,input{*overflow:visible;line-height:normal}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}label,select,button,input[type="button"],input[type="reset"],input[type="submit"],input[type="radio"],input[type="checkbox"]{cursor:pointer}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}textarea{overflow:auto;vertical-align:top}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}}body{margin:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#333;background-color:#fff}a{color:#08c;text-decoration:none}a:hover,a:focus{color:#005580;text-decoration:underline}.img-rounded{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.img-polaroid{padding:4px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.1);box-shadow:0 1px 3px rgba(0,0,0,0.1)}.img-circle{-webkit-border-radius:500px;-moz-border-radius:500px;border-radius:500px}.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.span12{width:940px}.span11{width:860px}.span10{width:780px}.span9{width:700px}.span8{width:620px}.span7{width:540px}.span6{width:460px}.span5{width:380px}.span4{width:300px}.span3{width:220px}.span2{width:140px}.span1{width:60px}.offset12{margin-left:980px}.offset11{margin-left:900px}.offset10{margin-left:820px}.offset9{margin-left:740px}.offset8{margin-left:660px}.offset7{margin-left:580px}.offset6{margin-left:500px}.offset5{margin-left:420px}.offset4{margin-left:340px}.offset3{margin-left:260px}.offset2{margin-left:180px}.offset1{margin-left:100px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.127659574468085%;*margin-left:2.074468085106383%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.127659574468085%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.48936170212765%;*width:91.43617021276594%}.row-fluid .span10{width:82.97872340425532%;*width:82.92553191489361%}.row-fluid .span9{width:74.46808510638297%;*width:74.41489361702126%}.row-fluid .span8{width:65.95744680851064%;*width:65.90425531914893%}.row-fluid .span7{width:57.44680851063829%;*width:57.39361702127659%}.row-fluid .span6{width:48.93617021276595%;*width:48.88297872340425%}.row-fluid .span5{width:40.42553191489362%;*width:40.37234042553192%}.row-fluid .span4{width:31.914893617021278%;*width:31.861702127659576%}.row-fluid .span3{width:23.404255319148934%;*width:23.351063829787233%}.row-fluid .span2{width:14.893617021276595%;*width:14.840425531914894%}.row-fluid .span1{width:6.382978723404255%;*width:6.329787234042553%}.row-fluid .offset12{margin-left:104.25531914893617%;*margin-left:104.14893617021275%}.row-fluid .offset12:first-child{margin-left:102.12765957446808%;*margin-left:102.02127659574467%}.row-fluid .offset11{margin-left:95.74468085106382%;*margin-left:95.6382978723404%}.row-fluid .offset11:first-child{margin-left:93.61702127659574%;*margin-left:93.51063829787232%}.row-fluid .offset10{margin-left:87.23404255319149%;*margin-left:87.12765957446807%}.row-fluid .offset10:first-child{margin-left:85.1063829787234%;*margin-left:84.99999999999999%}.row-fluid .offset9{margin-left:78.72340425531914%;*margin-left:78.61702127659572%}.row-fluid .offset9:first-child{margin-left:76.59574468085106%;*margin-left:76.48936170212764%}.row-fluid .offset8{margin-left:70.2127659574468%;*margin-left:70.10638297872339%}.row-fluid .offset8:first-child{margin-left:68.08510638297872%;*margin-left:67.9787234042553%}.row-fluid .offset7{margin-left:61.70212765957446%;*margin-left:61.59574468085106%}.row-fluid .offset7:first-child{margin-left:59.574468085106375%;*margin-left:59.46808510638297%}.row-fluid .offset6{margin-left:53.191489361702125%;*margin-left:53.085106382978715%}.row-fluid .offset6:first-child{margin-left:51.063829787234035%;*margin-left:50.95744680851063%}.row-fluid .offset5{margin-left:44.68085106382979%;*margin-left:44.57446808510638%}.row-fluid .offset5:first-child{margin-left:42.5531914893617%;*margin-left:42.4468085106383%}.row-fluid .offset4{margin-left:36.170212765957444%;*margin-left:36.06382978723405%}.row-fluid .offset4:first-child{margin-left:34.04255319148936%;*margin-left:33.93617021276596%}.row-fluid .offset3{margin-left:27.659574468085104%;*margin-left:27.5531914893617%}.row-fluid .offset3:first-child{margin-left:25.53191489361702%;*margin-left:25.425531914893618%}.row-fluid .offset2{margin-left:19.148936170212764%;*margin-left:19.04255319148936%}.row-fluid .offset2:first-child{margin-left:17.02127659574468%;*margin-left:16.914893617021278%}.row-fluid .offset1{margin-left:10.638297872340425%;*margin-left:10.53191489361702%}.row-fluid .offset1:first-child{margin-left:8.51063829787234%;*margin-left:8.404255319148938%}[class*="span"].hide,.row-fluid [class*="span"].hide{display:none}[class*="span"].pull-right,.row-fluid [class*="span"].pull-right{float:right}.container{margin-right:auto;margin-left:auto;*zoom:1}.container:before,.container:after{display:table;line-height:0;content:""}.container:after{clear:both}.container-fluid{padding-right:20px;padding-left:20px;*zoom:1}.container-fluid:before,.container-fluid:after{display:table;line-height:0;content:""}.container-fluid:after{clear:both}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:21px;font-weight:200;line-height:30px}small{font-size:85%}strong{font-weight:bold}em{font-style:italic}cite{font-style:normal}.muted{color:#999}a.muted:hover,a.muted:focus{color:#808080}.text-warning{color:#c09853}a.text-warning:hover,a.text-warning:focus{color:#a47e3c}.text-error{color:#b94a48}a.text-error:hover,a.text-error:focus{color:#953b39}.text-info{color:#3a87ad}a.text-info:hover,a.text-info:focus{color:#2d6987}.text-success{color:#468847}a.text-success:hover,a.text-success:focus{color:#356635}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;line-height:20px;color:inherit;text-rendering:optimizelegibility}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{line-height:40px}h1{font-size:38.5px}h2{font-size:31.5px}h3{font-size:24.5px}h4{font-size:17.5px}h5{font-size:14px}h6{font-size:11.9px}h1 small{font-size:24.5px}h2 small{font-size:17.5px}h3 small{font-size:14px}h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:20px 0 30px;border-bottom:1px solid #eee}ul,ol{padding:0;margin:0 0 10px 25px}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}li{line-height:20px}ul.unstyled,ol.unstyled{margin-left:0;list-style:none}ul.inline,ol.inline{margin-left:0;list-style:none}ul.inline>li,ol.inline>li{display:inline-block;*display:inline;padding-right:5px;padding-left:5px;*zoom:1}dl{margin-bottom:20px}dt,dd{line-height:20px}dt{font-weight:bold}dd{margin-left:10px}.dl-horizontal{*zoom:1}.dl-horizontal:before,.dl-horizontal:after{display:table;line-height:0;content:""}.dl-horizontal:after{clear:both}.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}hr{margin:20px 0;border:0;border-top:1px solid #eee;border-bottom:1px solid #fff}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:0 0 0 15px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{margin-bottom:0;font-size:17.5px;font-weight:300;line-height:1.25}blockquote small{display:block;line-height:20px;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{float:right;padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:20px}code,pre{padding:0 3px 2px;font-family:Monaco,Menlo,Consolas,"Courier New",monospace;font-size:12px;color:#333;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}code{padding:2px 4px;color:#d14;white-space:nowrap;background-color:#f7f7f9;border:1px solid #e1e1e8}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:20px;word-break:break-all;word-wrap:break-word;white-space:pre;white-space:pre-wrap;background-color:#f5f5f5;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;color:inherit;white-space:pre;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}form{margin:0 0 20px}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:40px;color:#333;border:0;border-bottom:1px solid #e5e5e5}legend small{font-size:15px;color:#999}label,input,button,select,textarea{font-size:14px;font-weight:normal;line-height:20px}input,button,select,textarea{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}label{display:block;margin-bottom:5px}select,textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{display:inline-block;height:20px;padding:4px 6px;margin-bottom:10px;font-size:14px;line-height:20px;color:#555;vertical-align:middle;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}input,textarea,.uneditable-input{width:206px}textarea{height:auto}textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{background-color:#fff;border:1px solid #ccc;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border linear .2s,box-shadow linear .2s;-moz-transition:border linear .2s,box-shadow linear .2s;-o-transition:border linear .2s,box-shadow linear .2s;transition:border linear .2s,box-shadow linear .2s}textarea:focus,input[type="text"]:focus,input[type="password"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus,.uneditable-input:focus{border-color:rgba(82,168,236,0.8);outline:0;outline:thin dotted \9;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6)}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;*margin-top:0;line-height:normal}input[type="file"],input[type="image"],input[type="submit"],input[type="reset"],input[type="button"],input[type="radio"],input[type="checkbox"]{width:auto}select,input[type="file"]{height:30px;*margin-top:4px;line-height:30px}select{width:220px;background-color:#fff;border:1px solid #ccc}select[multiple],select[size]{height:auto}select:focus,input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.uneditable-input,.uneditable-textarea{color:#999;cursor:not-allowed;background-color:#fcfcfc;border-color:#ccc;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);box-shadow:inset 0 1px 2px rgba(0,0,0,0.025)}.uneditable-input{overflow:hidden;white-space:nowrap}.uneditable-textarea{width:auto;height:auto}input:-moz-placeholder,textarea:-moz-placeholder{color:#999}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#999}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#999}.radio,.checkbox{min-height:20px;padding-left:20px}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-20px}.controls>.radio:first-child,.controls>.checkbox:first-child{padding-top:5px}.radio.inline,.checkbox.inline{display:inline-block;padding-top:5px;margin-bottom:0;vertical-align:middle}.radio.inline+.radio.inline,.checkbox.inline+.checkbox.inline{margin-left:10px}.input-mini{width:60px}.input-small{width:90px}.input-medium{width:150px}.input-large{width:210px}.input-xlarge{width:270px}.input-xxlarge{width:530px}input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"]{float:none;margin-left:0}.input-append input[class*="span"],.input-append .uneditable-input[class*="span"],.input-prepend input[class*="span"],.input-prepend .uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"],.row-fluid .input-prepend [class*="span"],.row-fluid .input-append [class*="span"]{display:inline-block}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:926px}input.span11,textarea.span11,.uneditable-input.span11{width:846px}input.span10,textarea.span10,.uneditable-input.span10{width:766px}input.span9,textarea.span9,.uneditable-input.span9{width:686px}input.span8,textarea.span8,.uneditable-input.span8{width:606px}input.span7,textarea.span7,.uneditable-input.span7{width:526px}input.span6,textarea.span6,.uneditable-input.span6{width:446px}input.span5,textarea.span5,.uneditable-input.span5{width:366px}input.span4,textarea.span4,.uneditable-input.span4{width:286px}input.span3,textarea.span3,.uneditable-input.span3{width:206px}input.span2,textarea.span2,.uneditable-input.span2{width:126px}input.span1,textarea.span1,.uneditable-input.span1{width:46px}.controls-row{*zoom:1}.controls-row:before,.controls-row:after{display:table;line-height:0;content:""}.controls-row:after{clear:both}.controls-row [class*="span"],.row-fluid .controls-row [class*="span"]{float:left}.controls-row .checkbox[class*="span"],.controls-row .radio[class*="span"]{padding-top:5px}input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#eee}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"][readonly],input[type="checkbox"][readonly]{background-color:transparent}.control-group.warning .control-label,.control-group.warning .help-block,.control-group.warning .help-inline{color:#c09853}.control-group.warning .checkbox,.control-group.warning .radio,.control-group.warning input,.control-group.warning select,.control-group.warning textarea{color:#c09853}.control-group.warning input,.control-group.warning select,.control-group.warning textarea{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.warning input:focus,.control-group.warning select:focus,.control-group.warning textarea:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.control-group.warning .input-prepend .add-on,.control-group.warning .input-append .add-on{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.control-group.error .control-label,.control-group.error .help-block,.control-group.error .help-inline{color:#b94a48}.control-group.error .checkbox,.control-group.error .radio,.control-group.error input,.control-group.error select,.control-group.error textarea{color:#b94a48}.control-group.error input,.control-group.error select,.control-group.error textarea{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.error input:focus,.control-group.error select:focus,.control-group.error textarea:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.control-group.error .input-prepend .add-on,.control-group.error .input-append .add-on{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.control-group.success .control-label,.control-group.success .help-block,.control-group.success .help-inline{color:#468847}.control-group.success .checkbox,.control-group.success .radio,.control-group.success input,.control-group.success select,.control-group.success textarea{color:#468847}.control-group.success input,.control-group.success select,.control-group.success textarea{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.success input:focus,.control-group.success select:focus,.control-group.success textarea:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.control-group.success .input-prepend .add-on,.control-group.success .input-append .add-on{color:#468847;background-color:#dff0d8;border-color:#468847}.control-group.info .control-label,.control-group.info .help-block,.control-group.info .help-inline{color:#3a87ad}.control-group.info .checkbox,.control-group.info .radio,.control-group.info input,.control-group.info select,.control-group.info textarea{color:#3a87ad}.control-group.info input,.control-group.info select,.control-group.info textarea{border-color:#3a87ad;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.info input:focus,.control-group.info select:focus,.control-group.info textarea:focus{border-color:#2d6987;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3}.control-group.info .input-prepend .add-on,.control-group.info .input-append .add-on{color:#3a87ad;background-color:#d9edf7;border-color:#3a87ad}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#b94a48;border-color:#ee5f5b}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#e9322d;-webkit-box-shadow:0 0 6px #f8b9b7;-moz-box-shadow:0 0 6px #f8b9b7;box-shadow:0 0 6px #f8b9b7}.form-actions{padding:19px 20px 20px;margin-top:20px;margin-bottom:20px;background-color:#f5f5f5;border-top:1px solid #e5e5e5;*zoom:1}.form-actions:before,.form-actions:after{display:table;line-height:0;content:""}.form-actions:after{clear:both}.help-block,.help-inline{color:#595959}.help-block{display:block;margin-bottom:10px}.help-inline{display:inline-block;*display:inline;padding-left:5px;vertical-align:middle;*zoom:1}.input-append,.input-prepend{display:inline-block;margin-bottom:10px;font-size:0;white-space:nowrap;vertical-align:middle}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input,.input-append .dropdown-menu,.input-prepend .dropdown-menu,.input-append .popover,.input-prepend .popover{font-size:14px}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input{position:relative;margin-bottom:0;*margin-left:0;vertical-align:top;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append input:focus,.input-prepend input:focus,.input-append select:focus,.input-prepend select:focus,.input-append .uneditable-input:focus,.input-prepend .uneditable-input:focus{z-index:2}.input-append .add-on,.input-prepend .add-on{display:inline-block;width:auto;height:20px;min-width:16px;padding:4px 5px;font-size:14px;font-weight:normal;line-height:20px;text-align:center;text-shadow:0 1px 0 #fff;background-color:#eee;border:1px solid #ccc}.input-append .add-on,.input-prepend .add-on,.input-append .btn,.input-prepend .btn,.input-append .btn-group>.dropdown-toggle,.input-prepend .btn-group>.dropdown-toggle{vertical-align:top;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-append .active,.input-prepend .active{background-color:#a9dba9;border-color:#46a546}.input-prepend .add-on,.input-prepend .btn{margin-right:-1px}.input-prepend .add-on:first-child,.input-prepend .btn:first-child{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input,.input-append select,.input-append .uneditable-input{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input+.btn-group .btn:last-child,.input-append select+.btn-group .btn:last-child,.input-append .uneditable-input+.btn-group .btn:last-child{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append .add-on,.input-append .btn,.input-append .btn-group{margin-left:-1px}.input-append .add-on:last-child,.input-append .btn:last-child,.input-append .btn-group:last-child>.dropdown-toggle{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append input,.input-prepend.input-append select,.input-prepend.input-append .uneditable-input{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-prepend.input-append input+.btn-group .btn,.input-prepend.input-append select+.btn-group .btn,.input-prepend.input-append .uneditable-input+.btn-group .btn{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .add-on:first-child,.input-prepend.input-append .btn:first-child{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-prepend.input-append .add-on:last-child,.input-prepend.input-append .btn:last-child{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .btn-group:first-child{margin-left:0}input.search-query{padding-right:14px;padding-right:4px \9;padding-left:14px;padding-left:4px \9;margin-bottom:0;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.form-search .input-append .search-query,.form-search .input-prepend .search-query{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.form-search .input-append .search-query{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search .input-append .btn{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .search-query{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .btn{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search input,.form-inline input,.form-horizontal input,.form-search textarea,.form-inline textarea,.form-horizontal textarea,.form-search select,.form-inline select,.form-horizontal select,.form-search .help-inline,.form-inline .help-inline,.form-horizontal .help-inline,.form-search .uneditable-input,.form-inline .uneditable-input,.form-horizontal .uneditable-input,.form-search .input-prepend,.form-inline .input-prepend,.form-horizontal .input-prepend,.form-search .input-append,.form-inline .input-append,.form-horizontal .input-append{display:inline-block;*display:inline;margin-bottom:0;vertical-align:middle;*zoom:1}.form-search .hide,.form-inline .hide,.form-horizontal .hide{display:none}.form-search label,.form-inline label,.form-search .btn-group,.form-inline .btn-group{display:inline-block}.form-search .input-append,.form-inline .input-append,.form-search .input-prepend,.form-inline .input-prepend{margin-bottom:0}.form-search .radio,.form-search .checkbox,.form-inline .radio,.form-inline .checkbox{padding-left:0;margin-bottom:0;vertical-align:middle}.form-search .radio input[type="radio"],.form-search .checkbox input[type="checkbox"],.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:left;margin-right:3px;margin-left:0}.control-group{margin-bottom:10px}legend+.control-group{margin-top:20px;-webkit-margin-top-collapse:separate}.form-horizontal .control-group{margin-bottom:20px;*zoom:1}.form-horizontal .control-group:before,.form-horizontal .control-group:after{display:table;line-height:0;content:""}.form-horizontal .control-group:after{clear:both}.form-horizontal .control-label{float:left;width:160px;padding-top:5px;text-align:right}.form-horizontal .controls{*display:inline-block;*padding-left:20px;margin-left:180px;*margin-left:0}.form-horizontal .controls:first-child{*padding-left:180px}.form-horizontal .help-block{margin-bottom:0}.form-horizontal input+.help-block,.form-horizontal select+.help-block,.form-horizontal textarea+.help-block,.form-horizontal .uneditable-input+.help-block,.form-horizontal .input-prepend+.help-block,.form-horizontal .input-append+.help-block{margin-top:10px}.form-horizontal .form-actions{padding-left:180px}table{max-width:100%;background-color:transparent;border-collapse:collapse;border-spacing:0}.table{width:100%;margin-bottom:20px}.table th,.table td{padding:8px;line-height:20px;text-align:left;vertical-align:top;border-top:1px solid #ddd}.table th{font-weight:bold}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed th,.table-condensed td{padding:4px 5px}.table-bordered{border:1px solid #ddd;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.table-bordered th,.table-bordered td{border-left:1px solid #ddd}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child>th:first-child,.table-bordered tbody:first-child tr:first-child>td:first-child,.table-bordered tbody:first-child tr:first-child>th:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered thead:first-child tr:first-child>th:last-child,.table-bordered tbody:first-child tr:first-child>td:last-child,.table-bordered tbody:first-child tr:first-child>th:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-bordered thead:last-child tr:last-child>th:first-child,.table-bordered tbody:last-child tr:last-child>td:first-child,.table-bordered tbody:last-child tr:last-child>th:first-child,.table-bordered tfoot:last-child tr:last-child>td:first-child,.table-bordered tfoot:last-child tr:last-child>th:first-child{-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px}.table-bordered thead:last-child tr:last-child>th:last-child,.table-bordered tbody:last-child tr:last-child>td:last-child,.table-bordered tbody:last-child tr:last-child>th:last-child,.table-bordered tfoot:last-child tr:last-child>td:last-child,.table-bordered tfoot:last-child tr:last-child>th:last-child{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-bottomright:4px}.table-bordered tfoot+tbody:last-child tr:last-child td:first-child{-webkit-border-bottom-left-radius:0;border-bottom-left-radius:0;-moz-border-radius-bottomleft:0}.table-bordered tfoot+tbody:last-child tr:last-child td:last-child{-webkit-border-bottom-right-radius:0;border-bottom-right-radius:0;-moz-border-radius-bottomright:0}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-striped tbody>tr:nth-child(odd)>td,.table-striped tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover tbody tr:hover>td,.table-hover tbody tr:hover>th{background-color:#f5f5f5}table td[class*="span"],table th[class*="span"],.row-fluid table td[class*="span"],.row-fluid table th[class*="span"]{display:table-cell;float:none;margin-left:0}.table td.span1,.table th.span1{float:none;width:44px;margin-left:0}.table td.span2,.table th.span2{float:none;width:124px;margin-left:0}.table td.span3,.table th.span3{float:none;width:204px;margin-left:0}.table td.span4,.table th.span4{float:none;width:284px;margin-left:0}.table td.span5,.table th.span5{float:none;width:364px;margin-left:0}.table td.span6,.table th.span6{float:none;width:444px;margin-left:0}.table td.span7,.table th.span7{float:none;width:524px;margin-left:0}.table td.span8,.table th.span8{float:none;width:604px;margin-left:0}.table td.span9,.table th.span9{float:none;width:684px;margin-left:0}.table td.span10,.table th.span10{float:none;width:764px;margin-left:0}.table td.span11,.table th.span11{float:none;width:844px;margin-left:0}.table td.span12,.table th.span12{float:none;width:924px;margin-left:0}.table tbody tr.success>td{background-color:#dff0d8}.table tbody tr.error>td{background-color:#f2dede}.table tbody tr.warning>td{background-color:#fcf8e3}.table tbody tr.info>td{background-color:#d9edf7}.table-hover tbody tr.success:hover>td{background-color:#d0e9c6}.table-hover tbody tr.error:hover>td{background-color:#ebcccc}.table-hover tbody tr.warning:hover>td{background-color:#faf2cc}.table-hover tbody tr.info:hover>td{background-color:#c4e3f3}[class^="icon-"],[class*=" icon-"]{display:inline-block;width:14px;height:14px;margin-top:1px;*margin-right:.3em;line-height:14px;vertical-align:text-top;background-image:url("../img/glyphicons-halflings.png");background-position:14px 14px;background-repeat:no-repeat}.icon-white,.nav-pills>.active>a>[class^="icon-"],.nav-pills>.active>a>[class*=" icon-"],.nav-list>.active>a>[class^="icon-"],.nav-list>.active>a>[class*=" icon-"],.navbar-inverse .nav>.active>a>[class^="icon-"],.navbar-inverse .nav>.active>a>[class*=" icon-"],.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:focus>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>li>a:focus>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"],.dropdown-submenu:hover>a>[class^="icon-"],.dropdown-submenu:focus>a>[class^="icon-"],.dropdown-submenu:hover>a>[class*=" icon-"],.dropdown-submenu:focus>a>[class*=" icon-"]{background-image:url("../img/glyphicons-halflings-white.png")}.icon-glass{background-position:0 0}.icon-music{background-position:-24px 0}.icon-search{background-position:-48px 0}.icon-envelope{background-position:-72px 0}.icon-heart{background-position:-96px 0}.icon-star{background-position:-120px 0}.icon-star-empty{background-position:-144px 0}.icon-user{background-position:-168px 0}.icon-film{background-position:-192px 0}.icon-th-large{background-position:-216px 0}.icon-th{background-position:-240px 0}.icon-th-list{background-position:-264px 0}.icon-ok{background-position:-288px 0}.icon-remove{background-position:-312px 0}.icon-zoom-in{background-position:-336px 0}.icon-zoom-out{background-position:-360px 0}.icon-off{background-position:-384px 0}.icon-signal{background-position:-408px 0}.icon-cog{background-position:-432px 0}.icon-trash{background-position:-456px 0}.icon-home{background-position:0 -24px}.icon-file{background-position:-24px -24px}.icon-time{background-position:-48px -24px}.icon-road{background-position:-72px -24px}.icon-download-alt{background-position:-96px -24px}.icon-download{background-position:-120px -24px}.icon-upload{background-position:-144px -24px}.icon-inbox{background-position:-168px -24px}.icon-play-circle{background-position:-192px -24px}.icon-repeat{background-position:-216px -24px}.icon-refresh{background-position:-240px -24px}.icon-list-alt{background-position:-264px -24px}.icon-lock{background-position:-287px -24px}.icon-flag{background-position:-312px -24px}.icon-headphones{background-position:-336px -24px}.icon-volume-off{background-position:-360px -24px}.icon-volume-down{background-position:-384px -24px}.icon-volume-up{background-position:-408px -24px}.icon-qrcode{background-position:-432px -24px}.icon-barcode{background-position:-456px -24px}.icon-tag{background-position:0 -48px}.icon-tags{background-position:-25px -48px}.icon-book{background-position:-48px -48px}.icon-bookmark{background-position:-72px -48px}.icon-print{background-position:-96px -48px}.icon-camera{background-position:-120px -48px}.icon-font{background-position:-144px -48px}.icon-bold{background-position:-167px -48px}.icon-italic{background-position:-192px -48px}.icon-text-height{background-position:-216px -48px}.icon-text-width{background-position:-240px -48px}.icon-align-left{background-position:-264px -48px}.icon-align-center{background-position:-288px -48px}.icon-align-right{background-position:-312px -48px}.icon-align-justify{background-position:-336px -48px}.icon-list{background-position:-360px -48px}.icon-indent-left{background-position:-384px -48px}.icon-indent-right{background-position:-408px -48px}.icon-facetime-video{background-position:-432px -48px}.icon-picture{background-position:-456px -48px}.icon-pencil{background-position:0 -72px}.icon-map-marker{background-position:-24px -72px}.icon-adjust{background-position:-48px -72px}.icon-tint{background-position:-72px -72px}.icon-edit{background-position:-96px -72px}.icon-share{background-position:-120px -72px}.icon-check{background-position:-144px -72px}.icon-move{background-position:-168px -72px}.icon-step-backward{background-position:-192px -72px}.icon-fast-backward{background-position:-216px -72px}.icon-backward{background-position:-240px -72px}.icon-play{background-position:-264px -72px}.icon-pause{background-position:-288px -72px}.icon-stop{background-position:-312px -72px}.icon-forward{background-position:-336px -72px}.icon-fast-forward{background-position:-360px -72px}.icon-step-forward{background-position:-384px -72px}.icon-eject{background-position:-408px -72px}.icon-chevron-left{background-position:-432px -72px}.icon-chevron-right{background-position:-456px -72px}.icon-plus-sign{background-position:0 -96px}.icon-minus-sign{background-position:-24px -96px}.icon-remove-sign{background-position:-48px -96px}.icon-ok-sign{background-position:-72px -96px}.icon-question-sign{background-position:-96px -96px}.icon-info-sign{background-position:-120px -96px}.icon-screenshot{background-position:-144px -96px}.icon-remove-circle{background-position:-168px -96px}.icon-ok-circle{background-position:-192px -96px}.icon-ban-circle{background-position:-216px -96px}.icon-arrow-left{background-position:-240px -96px}.icon-arrow-right{background-position:-264px -96px}.icon-arrow-up{background-position:-289px -96px}.icon-arrow-down{background-position:-312px -96px}.icon-share-alt{background-position:-336px -96px}.icon-resize-full{background-position:-360px -96px}.icon-resize-small{background-position:-384px -96px}.icon-plus{background-position:-408px -96px}.icon-minus{background-position:-433px -96px}.icon-asterisk{background-position:-456px -96px}.icon-exclamation-sign{background-position:0 -120px}.icon-gift{background-position:-24px -120px}.icon-leaf{background-position:-48px -120px}.icon-fire{background-position:-72px -120px}.icon-eye-open{background-position:-96px -120px}.icon-eye-close{background-position:-120px -120px}.icon-warning-sign{background-position:-144px -120px}.icon-plane{background-position:-168px -120px}.icon-calendar{background-position:-192px -120px}.icon-random{width:16px;background-position:-216px -120px}.icon-comment{background-position:-240px -120px}.icon-magnet{background-position:-264px -120px}.icon-chevron-up{background-position:-288px -120px}.icon-chevron-down{background-position:-313px -119px}.icon-retweet{background-position:-336px -120px}.icon-shopping-cart{background-position:-360px -120px}.icon-folder-close{width:16px;background-position:-384px -120px}.icon-folder-open{width:16px;background-position:-408px -120px}.icon-resize-vertical{background-position:-432px -119px}.icon-resize-horizontal{background-position:-456px -118px}.icon-hdd{background-position:0 -144px}.icon-bullhorn{background-position:-24px -144px}.icon-bell{background-position:-48px -144px}.icon-certificate{background-position:-72px -144px}.icon-thumbs-up{background-position:-96px -144px}.icon-thumbs-down{background-position:-120px -144px}.icon-hand-right{background-position:-144px -144px}.icon-hand-left{background-position:-168px -144px}.icon-hand-up{background-position:-192px -144px}.icon-hand-down{background-position:-216px -144px}.icon-circle-arrow-right{background-position:-240px -144px}.icon-circle-arrow-left{background-position:-264px -144px}.icon-circle-arrow-up{background-position:-288px -144px}.icon-circle-arrow-down{background-position:-312px -144px}.icon-globe{background-position:-336px -144px}.icon-wrench{background-position:-360px -144px}.icon-tasks{background-position:-384px -144px}.icon-filter{background-position:-408px -144px}.icon-briefcase{background-position:-432px -144px}.icon-fullscreen{background-position:-456px -144px}.dropup,.dropdown{position:relative}.dropdown-toggle{*margin-bottom:-3px}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.caret{display:inline-block;width:0;height:0;vertical-align:top;border-top:4px solid #000;border-right:4px solid transparent;border-left:4px solid transparent;content:""}.dropdown .caret{margin-top:8px;margin-left:2px}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:20px;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-submenu:hover>a,.dropdown-submenu:focus>a{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;outline:0;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:default;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open{*z-index:1000}.open>.dropdown-menu{display:block}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover>.dropdown-menu{display:block}.dropup .dropdown-submenu>.dropdown-menu{top:auto;bottom:0;margin-top:0;margin-bottom:-2px;-webkit-border-radius:5px 5px 5px 0;-moz-border-radius:5px 5px 5px 0;border-radius:5px 5px 5px 0}.dropdown-submenu>a:after{display:block;float:right;width:0;height:0;margin-top:5px;margin-right:-10px;border-color:transparent;border-left-color:#ccc;border-style:solid;border-width:5px 0 5px 5px;content:" "}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown-submenu.pull-left{float:none}.dropdown-submenu.pull-left>.dropdown-menu{left:-100%;margin-left:10px;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.dropdown .dropdown-menu .nav-header{padding-right:20px;padding-left:20px}.typeahead{z-index:1051;margin-top:2px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-large{padding:24px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.well-small{padding:9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.fade{opacity:0;-webkit-transition:opacity .15s linear;-moz-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-moz-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.collapse.in{height:auto}.close{float:right;font-size:20px;font-weight:bold;line-height:20px;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.4;filter:alpha(opacity=40)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.btn{display:inline-block;*display:inline;padding:4px 12px;margin-bottom:0;*margin-left:.3em;font-size:14px;line-height:20px;color:#333;text-align:center;text-shadow:0 1px 1px rgba(255,255,255,0.75);vertical-align:middle;cursor:pointer;background-color:#f5f5f5;*background-color:#e6e6e6;background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-repeat:repeat-x;border:1px solid #ccc;*border:0;border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);border-bottom-color:#b3b3b3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);*zoom:1;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn:hover,.btn:focus,.btn:active,.btn.active,.btn.disabled,.btn[disabled]{color:#333;background-color:#e6e6e6;*background-color:#d9d9d9}.btn:active,.btn.active{background-color:#ccc \9}.btn:first-child{*margin-left:0}.btn:hover,.btn:focus{color:#333;text-decoration:none;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn.disabled,.btn[disabled]{cursor:default;background-image:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-large{padding:11px 19px;font-size:17.5px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.btn-large [class^="icon-"],.btn-large [class*=" icon-"]{margin-top:4px}.btn-small{padding:2px 10px;font-size:11.9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-small [class^="icon-"],.btn-small [class*=" icon-"]{margin-top:0}.btn-mini [class^="icon-"],.btn-mini [class*=" icon-"]{margin-top:-1px}.btn-mini{padding:0 6px;font-size:10.5px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.btn-primary.active,.btn-warning.active,.btn-danger.active,.btn-success.active,.btn-info.active,.btn-inverse.active{color:rgba(255,255,255,0.75)}.btn-primary{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#006dcc;*background-color:#04c;background-image:-moz-linear-gradient(top,#08c,#04c);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#04c));background-image:-webkit-linear-gradient(top,#08c,#04c);background-image:-o-linear-gradient(top,#08c,#04c);background-image:linear-gradient(to bottom,#08c,#04c);background-repeat:repeat-x;border-color:#04c #04c #002a80;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0044cc',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.btn-primary.disabled,.btn-primary[disabled]{color:#fff;background-color:#04c;*background-color:#003bb3}.btn-primary:active,.btn-primary.active{background-color:#039 \9}.btn-warning{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#faa732;*background-color:#f89406;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;border-color:#f89406 #f89406 #ad6704;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.btn-warning.disabled,.btn-warning[disabled]{color:#fff;background-color:#f89406;*background-color:#df8505}.btn-warning:active,.btn-warning.active{background-color:#c67605 \9}.btn-danger{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#da4f49;*background-color:#bd362f;background-image:-moz-linear-gradient(top,#ee5f5b,#bd362f);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#bd362f));background-image:-webkit-linear-gradient(top,#ee5f5b,#bd362f);background-image:-o-linear-gradient(top,#ee5f5b,#bd362f);background-image:linear-gradient(to bottom,#ee5f5b,#bd362f);background-repeat:repeat-x;border-color:#bd362f #bd362f #802420;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffbd362f',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.btn-danger.disabled,.btn-danger[disabled]{color:#fff;background-color:#bd362f;*background-color:#a9302a}.btn-danger:active,.btn-danger.active{background-color:#942a25 \9}.btn-success{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#5bb75b;*background-color:#51a351;background-image:-moz-linear-gradient(top,#62c462,#51a351);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#51a351));background-image:-webkit-linear-gradient(top,#62c462,#51a351);background-image:-o-linear-gradient(top,#62c462,#51a351);background-image:linear-gradient(to bottom,#62c462,#51a351);background-repeat:repeat-x;border-color:#51a351 #51a351 #387038;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff51a351',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.btn-success.disabled,.btn-success[disabled]{color:#fff;background-color:#51a351;*background-color:#499249}.btn-success:active,.btn-success.active{background-color:#408140 \9}.btn-info{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#49afcd;*background-color:#2f96b4;background-image:-moz-linear-gradient(top,#5bc0de,#2f96b4);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#2f96b4));background-image:-webkit-linear-gradient(top,#5bc0de,#2f96b4);background-image:-o-linear-gradient(top,#5bc0de,#2f96b4);background-image:linear-gradient(to bottom,#5bc0de,#2f96b4);background-repeat:repeat-x;border-color:#2f96b4 #2f96b4 #1f6377;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff2f96b4',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.btn-info.disabled,.btn-info[disabled]{color:#fff;background-color:#2f96b4;*background-color:#2a85a0}.btn-info:active,.btn-info.active{background-color:#24748c \9}.btn-inverse{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#363636;*background-color:#222;background-image:-moz-linear-gradient(top,#444,#222);background-image:-webkit-gradient(linear,0 0,0 100%,from(#444),to(#222));background-image:-webkit-linear-gradient(top,#444,#222);background-image:-o-linear-gradient(top,#444,#222);background-image:linear-gradient(to bottom,#444,#222);background-repeat:repeat-x;border-color:#222 #222 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff444444',endColorstr='#ff222222',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-inverse:hover,.btn-inverse:focus,.btn-inverse:active,.btn-inverse.active,.btn-inverse.disabled,.btn-inverse[disabled]{color:#fff;background-color:#222;*background-color:#151515}.btn-inverse:active,.btn-inverse.active{background-color:#080808 \9}button.btn,input[type="submit"].btn{*padding-top:3px;*padding-bottom:3px}button.btn::-moz-focus-inner,input[type="submit"].btn::-moz-focus-inner{padding:0;border:0}button.btn.btn-large,input[type="submit"].btn.btn-large{*padding-top:7px;*padding-bottom:7px}button.btn.btn-small,input[type="submit"].btn.btn-small{*padding-top:3px;*padding-bottom:3px}button.btn.btn-mini,input[type="submit"].btn.btn-mini{*padding-top:1px;*padding-bottom:1px}.btn-link,.btn-link:active,.btn-link[disabled]{background-color:transparent;background-image:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-link{color:#08c;cursor:pointer;border-color:transparent;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-link:hover,.btn-link:focus{color:#005580;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,.btn-link[disabled]:focus{color:#333;text-decoration:none}.btn-group{position:relative;display:inline-block;*display:inline;*margin-left:.3em;font-size:0;white-space:nowrap;vertical-align:middle;*zoom:1}.btn-group:first-child{*margin-left:0}.btn-group+.btn-group{margin-left:5px}.btn-toolbar{margin-top:10px;margin-bottom:10px;font-size:0}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group{margin-left:5px}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group>.btn+.btn{margin-left:-1px}.btn-group>.btn,.btn-group>.dropdown-menu,.btn-group>.popover{font-size:14px}.btn-group>.btn-mini{font-size:10.5px}.btn-group>.btn-small{font-size:11.9px}.btn-group>.btn-large{font-size:17.5px}.btn-group>.btn:first-child{margin-left:0;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.btn-group>.btn:last-child,.btn-group>.dropdown-toggle{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.btn-group>.btn.large:first-child{margin-left:0;-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.btn-group>.btn.large:last-child,.btn-group>.large.dropdown-toggle{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:2}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{*padding-top:5px;padding-right:8px;*padding-bottom:5px;padding-left:8px;-webkit-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn-group>.btn-mini+.dropdown-toggle{*padding-top:2px;padding-right:5px;*padding-bottom:2px;padding-left:5px}.btn-group>.btn-small+.dropdown-toggle{*padding-top:5px;*padding-bottom:4px}.btn-group>.btn-large+.dropdown-toggle{*padding-top:7px;padding-right:12px;*padding-bottom:7px;padding-left:12px}.btn-group.open .dropdown-toggle{background-image:none;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn-group.open .btn.dropdown-toggle{background-color:#e6e6e6}.btn-group.open .btn-primary.dropdown-toggle{background-color:#04c}.btn-group.open .btn-warning.dropdown-toggle{background-color:#f89406}.btn-group.open .btn-danger.dropdown-toggle{background-color:#bd362f}.btn-group.open .btn-success.dropdown-toggle{background-color:#51a351}.btn-group.open .btn-info.dropdown-toggle{background-color:#2f96b4}.btn-group.open .btn-inverse.dropdown-toggle{background-color:#222}.btn .caret{margin-top:8px;margin-left:0}.btn-large .caret{margin-top:6px}.btn-large .caret{border-top-width:5px;border-right-width:5px;border-left-width:5px}.btn-mini .caret,.btn-small .caret{margin-top:8px}.dropup .btn-large .caret{border-bottom-width:5px}.btn-primary .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret,.btn-success .caret,.btn-inverse .caret{border-top-color:#fff;border-bottom-color:#fff}.btn-group-vertical{display:inline-block;*display:inline;*zoom:1}.btn-group-vertical>.btn{display:block;float:none;max-width:100%;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group-vertical>.btn+.btn{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:first-child{-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.btn-group-vertical>.btn:last-child{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.btn-group-vertical>.btn-large:first-child{-webkit-border-radius:6px 6px 0 0;-moz-border-radius:6px 6px 0 0;border-radius:6px 6px 0 0}.btn-group-vertical>.btn-large:last-child{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.alert{padding:8px 35px 8px 14px;margin-bottom:20px;text-shadow:0 1px 0 rgba(255,255,255,0.5);background-color:#fcf8e3;border:1px solid #fbeed5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.alert,.alert h4{color:#c09853}.alert h4{margin:0}.alert .close{position:relative;top:-2px;right:-21px;line-height:20px}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success h4{color:#468847}.alert-danger,.alert-error{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger h4,.alert-error h4{color:#b94a48}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info h4{color:#3a87ad}.alert-block{padding-top:14px;padding-bottom:14px}.alert-block>p,.alert-block>ul{margin-bottom:0}.alert-block p+p{margin-top:5px}.nav{margin-bottom:20px;margin-left:0;list-style:none}.nav>li>a{display:block}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li>a>img{max-width:none}.nav>.pull-right{float:right}.nav-header{display:block;padding:3px 15px;font-size:11px;font-weight:bold;line-height:20px;color:#999;text-shadow:0 1px 0 rgba(255,255,255,0.5);text-transform:uppercase}.nav li+.nav-header{margin-top:9px}.nav-list{padding-right:15px;padding-left:15px;margin-bottom:0}.nav-list>li>a,.nav-list .nav-header{margin-right:-15px;margin-left:-15px;text-shadow:0 1px 0 rgba(255,255,255,0.5)}.nav-list>li>a{padding:3px 15px}.nav-list>.active>a,.nav-list>.active>a:hover,.nav-list>.active>a:focus{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.2);background-color:#08c}.nav-list [class^="icon-"],.nav-list [class*=" icon-"]{margin-right:2px}.nav-list .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.nav-tabs,.nav-pills{*zoom:1}.nav-tabs:before,.nav-pills:before,.nav-tabs:after,.nav-pills:after{display:table;line-height:0;content:""}.nav-tabs:after,.nav-pills:after{clear:both}.nav-tabs>li,.nav-pills>li{float:left}.nav-tabs>li>a,.nav-pills>li>a{padding-right:12px;padding-left:12px;margin-right:2px;line-height:14px}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{margin-bottom:-1px}.nav-tabs>li>a{padding-top:8px;padding-bottom:8px;line-height:20px;border:1px solid transparent;-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover,.nav-tabs>li>a:focus{border-color:#eee #eee #ddd}.nav-tabs>.active>a,.nav-tabs>.active>a:hover,.nav-tabs>.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-pills>li>a{padding-top:8px;padding-bottom:8px;margin-top:2px;margin-bottom:2px;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.nav-pills>.active>a,.nav-pills>.active>a:hover,.nav-pills>.active>a:focus{color:#fff;background-color:#08c}.nav-stacked>li{float:none}.nav-stacked>li>a{margin-right:0}.nav-tabs.nav-stacked{border-bottom:0}.nav-tabs.nav-stacked>li>a{border:1px solid #ddd;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.nav-tabs.nav-stacked>li:first-child>a{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-topleft:4px}.nav-tabs.nav-stacked>li:last-child>a{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomright:4px;-moz-border-radius-bottomleft:4px}.nav-tabs.nav-stacked>li>a:hover,.nav-tabs.nav-stacked>li>a:focus{z-index:2;border-color:#ddd}.nav-pills.nav-stacked>li>a{margin-bottom:3px}.nav-pills.nav-stacked>li:last-child>a{margin-bottom:1px}.nav-tabs .dropdown-menu{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.nav-pills .dropdown-menu{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nav .dropdown-toggle .caret{margin-top:6px;border-top-color:#08c;border-bottom-color:#08c}.nav .dropdown-toggle:hover .caret,.nav .dropdown-toggle:focus .caret{border-top-color:#005580;border-bottom-color:#005580}.nav-tabs .dropdown-toggle .caret{margin-top:8px}.nav .active .dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.nav-tabs .active .dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.nav>.dropdown.active>a:hover,.nav>.dropdown.active>a:focus{cursor:pointer}.nav-tabs .open .dropdown-toggle,.nav-pills .open .dropdown-toggle,.nav>li.dropdown.open.active>a:hover,.nav>li.dropdown.open.active>a:focus{color:#fff;background-color:#999;border-color:#999}.nav li.dropdown.open .caret,.nav li.dropdown.open.active .caret,.nav li.dropdown.open a:hover .caret,.nav li.dropdown.open a:focus .caret{border-top-color:#fff;border-bottom-color:#fff;opacity:1;filter:alpha(opacity=100)}.tabs-stacked .open>a:hover,.tabs-stacked .open>a:focus{border-color:#999}.tabbable{*zoom:1}.tabbable:before,.tabbable:after{display:table;line-height:0;content:""}.tabbable:after{clear:both}.tab-content{overflow:auto}.tabs-below>.nav-tabs,.tabs-right>.nav-tabs,.tabs-left>.nav-tabs{border-bottom:0}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #ddd}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:hover,.tabs-below>.nav-tabs>li>a:focus{border-top-color:#ddd;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:hover,.tabs-below>.nav-tabs>.active>a:focus{border-color:transparent #ddd #ddd #ddd}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{min-width:74px;margin-right:0;margin-bottom:3px}.tabs-left>.nav-tabs{float:left;margin-right:19px;border-right:1px solid #ddd}.tabs-left>.nav-tabs>li>a{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:hover,.tabs-left>.nav-tabs>li>a:focus{border-color:#eee #ddd #eee #eee}.tabs-left>.nav-tabs .active>a,.tabs-left>.nav-tabs .active>a:hover,.tabs-left>.nav-tabs .active>a:focus{border-color:#ddd transparent #ddd #ddd;*border-right-color:#fff}.tabs-right>.nav-tabs{float:right;margin-left:19px;border-left:1px solid #ddd}.tabs-right>.nav-tabs>li>a{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:hover,.tabs-right>.nav-tabs>li>a:focus{border-color:#eee #eee #eee #ddd}.tabs-right>.nav-tabs .active>a,.tabs-right>.nav-tabs .active>a:hover,.tabs-right>.nav-tabs .active>a:focus{border-color:#ddd #ddd #ddd transparent;*border-left-color:#fff}.nav>.disabled>a{color:#999}.nav>.disabled>a:hover,.nav>.disabled>a:focus{text-decoration:none;cursor:default;background-color:transparent}.navbar{*position:relative;*z-index:2;margin-bottom:20px;overflow:visible}.navbar-inner{min-height:40px;padding-right:20px;padding-left:20px;background-color:#fafafa;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);background-repeat:repeat-x;border:1px solid #d4d4d4;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff2f2f2',GradientType=0);*zoom:1;-webkit-box-shadow:0 1px 4px rgba(0,0,0,0.065);-moz-box-shadow:0 1px 4px rgba(0,0,0,0.065);box-shadow:0 1px 4px rgba(0,0,0,0.065)}.navbar-inner:before,.navbar-inner:after{display:table;line-height:0;content:""}.navbar-inner:after{clear:both}.navbar .container{width:auto}.nav-collapse.collapse{height:auto;overflow:visible}.navbar .brand{display:block;float:left;padding:10px 20px 10px;margin-left:-20px;font-size:20px;font-weight:200;color:#777;text-shadow:0 1px 0 #fff}.navbar .brand:hover,.navbar .brand:focus{text-decoration:none}.navbar-text{margin-bottom:0;line-height:40px;color:#777}.navbar-link{color:#777}.navbar-link:hover,.navbar-link:focus{color:#333}.navbar .divider-vertical{height:40px;margin:0 9px;border-right:1px solid #fff;border-left:1px solid #f2f2f2}.navbar .btn,.navbar .btn-group{margin-top:5px}.navbar .btn-group .btn,.navbar .input-prepend .btn,.navbar .input-append .btn,.navbar .input-prepend .btn-group,.navbar .input-append .btn-group{margin-top:0}.navbar-form{margin-bottom:0;*zoom:1}.navbar-form:before,.navbar-form:after{display:table;line-height:0;content:""}.navbar-form:after{clear:both}.navbar-form input,.navbar-form select,.navbar-form .radio,.navbar-form .checkbox{margin-top:5px}.navbar-form input,.navbar-form select,.navbar-form .btn{display:inline-block;margin-bottom:0}.navbar-form input[type="image"],.navbar-form input[type="checkbox"],.navbar-form input[type="radio"]{margin-top:3px}.navbar-form .input-append,.navbar-form .input-prepend{margin-top:5px;white-space:nowrap}.navbar-form .input-append input,.navbar-form .input-prepend input{margin-top:0}.navbar-search{position:relative;float:left;margin-top:5px;margin-bottom:0}.navbar-search .search-query{padding:4px 14px;margin-bottom:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:13px;font-weight:normal;line-height:1;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.navbar-static-top{position:static;margin-bottom:0}.navbar-static-top .navbar-inner{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;margin-bottom:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{border-width:0 0 1px}.navbar-fixed-bottom .navbar-inner{border-width:1px 0 0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding-right:0;padding-left:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.navbar-fixed-top{top:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{-webkit-box-shadow:0 1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 10px rgba(0,0,0,0.1);box-shadow:0 1px 10px rgba(0,0,0,0.1)}.navbar-fixed-bottom{bottom:0}.navbar-fixed-bottom .navbar-inner{-webkit-box-shadow:0 -1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 -1px 10px rgba(0,0,0,0.1);box-shadow:0 -1px 10px rgba(0,0,0,0.1)}.navbar .nav{position:relative;left:0;display:block;float:left;margin:0 10px 0 0}.navbar .nav.pull-right{float:right;margin-right:0}.navbar .nav>li{float:left}.navbar .nav>li>a{float:none;padding:10px 15px 10px;color:#777;text-decoration:none;text-shadow:0 1px 0 #fff}.navbar .nav .dropdown-toggle .caret{margin-top:8px}.navbar .nav>li>a:focus,.navbar .nav>li>a:hover{color:#333;text-decoration:none;background-color:transparent}.navbar .nav>.active>a,.navbar .nav>.active>a:hover,.navbar .nav>.active>a:focus{color:#555;text-decoration:none;background-color:#e5e5e5;-webkit-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);-moz-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);box-shadow:inset 0 3px 8px rgba(0,0,0,0.125)}.navbar .btn-navbar{display:none;float:right;padding:7px 10px;margin-right:5px;margin-left:5px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#ededed;*background-color:#e5e5e5;background-image:-moz-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f2f2f2),to(#e5e5e5));background-image:-webkit-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-o-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:linear-gradient(to bottom,#f2f2f2,#e5e5e5);background-repeat:repeat-x;border-color:#e5e5e5 #e5e5e5 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2',endColorstr='#ffe5e5e5',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075)}.navbar .btn-navbar:hover,.navbar .btn-navbar:focus,.navbar .btn-navbar:active,.navbar .btn-navbar.active,.navbar .btn-navbar.disabled,.navbar .btn-navbar[disabled]{color:#fff;background-color:#e5e5e5;*background-color:#d9d9d9}.navbar .btn-navbar:active,.navbar .btn-navbar.active{background-color:#ccc \9}.navbar .btn-navbar .icon-bar{display:block;width:18px;height:2px;background-color:#f5f5f5;-webkit-border-radius:1px;-moz-border-radius:1px;border-radius:1px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.25);-moz-box-shadow:0 1px 0 rgba(0,0,0,0.25);box-shadow:0 1px 0 rgba(0,0,0,0.25)}.btn-navbar .icon-bar+.icon-bar{margin-top:3px}.navbar .nav>li>.dropdown-menu:before{position:absolute;top:-7px;left:9px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.navbar .nav>li>.dropdown-menu:after{position:absolute;top:-6px;left:10px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.navbar-fixed-bottom .nav>li>.dropdown-menu:before{top:auto;bottom:-7px;border-top:7px solid #ccc;border-bottom:0;border-top-color:rgba(0,0,0,0.2)}.navbar-fixed-bottom .nav>li>.dropdown-menu:after{top:auto;bottom:-6px;border-top:6px solid #fff;border-bottom:0}.navbar .nav li.dropdown>a:hover .caret,.navbar .nav li.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar .nav li.dropdown.open>.dropdown-toggle,.navbar .nav li.dropdown.active>.dropdown-toggle,.navbar .nav li.dropdown.open.active>.dropdown-toggle{color:#555;background-color:#e5e5e5}.navbar .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#777;border-bottom-color:#777}.navbar .nav li.dropdown.open>.dropdown-toggle .caret,.navbar .nav li.dropdown.active>.dropdown-toggle .caret,.navbar .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .pull-right>li>.dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar .pull-right>li>.dropdown-menu:before,.navbar .nav>li>.dropdown-menu.pull-right:before{right:12px;left:auto}.navbar .pull-right>li>.dropdown-menu:after,.navbar .nav>li>.dropdown-menu.pull-right:after{right:13px;left:auto}.navbar .pull-right>li>.dropdown-menu .dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right .dropdown-menu{right:100%;left:auto;margin-right:-1px;margin-left:0;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.navbar-inverse .navbar-inner{background-color:#1b1b1b;background-image:-moz-linear-gradient(top,#222,#111);background-image:-webkit-gradient(linear,0 0,0 100%,from(#222),to(#111));background-image:-webkit-linear-gradient(top,#222,#111);background-image:-o-linear-gradient(top,#222,#111);background-image:linear-gradient(to bottom,#222,#111);background-repeat:repeat-x;border-color:#252525;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222',endColorstr='#ff111111',GradientType=0)}.navbar-inverse .brand,.navbar-inverse .nav>li>a{color:#999;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-inverse .brand:hover,.navbar-inverse .nav>li>a:hover,.navbar-inverse .brand:focus,.navbar-inverse .nav>li>a:focus{color:#fff}.navbar-inverse .brand{color:#999}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .nav>li>a:focus,.navbar-inverse .nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .nav .active>a,.navbar-inverse .nav .active>a:hover,.navbar-inverse .nav .active>a:focus{color:#fff;background-color:#111}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover,.navbar-inverse .navbar-link:focus{color:#fff}.navbar-inverse .divider-vertical{border-right-color:#222;border-left-color:#111}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle{color:#fff;background-color:#111}.navbar-inverse .nav li.dropdown>a:hover .caret,.navbar-inverse .nav li.dropdown>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-search .search-query{color:#fff;background-color:#515151;border-color:#111;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.navbar-inverse .navbar-search .search-query:-moz-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:-ms-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:focus,.navbar-inverse .navbar-search .search-query.focused{padding:5px 15px;color:#333;text-shadow:0 1px 0 #fff;background-color:#fff;border:0;outline:0;-webkit-box-shadow:0 0 3px rgba(0,0,0,0.15);-moz-box-shadow:0 0 3px rgba(0,0,0,0.15);box-shadow:0 0 3px rgba(0,0,0,0.15)}.navbar-inverse .btn-navbar{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e0e0e;*background-color:#040404;background-image:-moz-linear-gradient(top,#151515,#040404);background-image:-webkit-gradient(linear,0 0,0 100%,from(#151515),to(#040404));background-image:-webkit-linear-gradient(top,#151515,#040404);background-image:-o-linear-gradient(top,#151515,#040404);background-image:linear-gradient(to bottom,#151515,#040404);background-repeat:repeat-x;border-color:#040404 #040404 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff151515',endColorstr='#ff040404',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.navbar-inverse .btn-navbar:hover,.navbar-inverse .btn-navbar:focus,.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active,.navbar-inverse .btn-navbar.disabled,.navbar-inverse .btn-navbar[disabled]{color:#fff;background-color:#040404;*background-color:#000}.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active{background-color:#000 \9}.breadcrumb{padding:8px 15px;margin:0 0 20px;list-style:none;background-color:#f5f5f5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.breadcrumb>li{display:inline-block;*display:inline;text-shadow:0 1px 0 #fff;*zoom:1}.breadcrumb>li>.divider{padding:0 5px;color:#ccc}.breadcrumb>.active{color:#999}.pagination{margin:20px 0}.pagination ul{display:inline-block;*display:inline;margin-bottom:0;margin-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;*zoom:1;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.pagination ul>li{display:inline}.pagination ul>li>a,.pagination ul>li>span{float:left;padding:4px 12px;line-height:20px;text-decoration:none;background-color:#fff;border:1px solid #ddd;border-left-width:0}.pagination ul>li>a:hover,.pagination ul>li>a:focus,.pagination ul>.active>a,.pagination ul>.active>span{background-color:#f5f5f5}.pagination ul>.active>a,.pagination ul>.active>span{color:#999;cursor:default}.pagination ul>.disabled>span,.pagination ul>.disabled>a,.pagination ul>.disabled>a:hover,.pagination ul>.disabled>a:focus{color:#999;cursor:default;background-color:transparent}.pagination ul>li:first-child>a,.pagination ul>li:first-child>span{border-left-width:1px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.pagination ul>li:last-child>a,.pagination ul>li:last-child>span{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.pagination-centered{text-align:center}.pagination-right{text-align:right}.pagination-large ul>li>a,.pagination-large ul>li>span{padding:11px 19px;font-size:17.5px}.pagination-large ul>li:first-child>a,.pagination-large ul>li:first-child>span{-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.pagination-large ul>li:last-child>a,.pagination-large ul>li:last-child>span{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.pagination-mini ul>li:first-child>a,.pagination-small ul>li:first-child>a,.pagination-mini ul>li:first-child>span,.pagination-small ul>li:first-child>span{-webkit-border-bottom-left-radius:3px;border-bottom-left-radius:3px;-webkit-border-top-left-radius:3px;border-top-left-radius:3px;-moz-border-radius-bottomleft:3px;-moz-border-radius-topleft:3px}.pagination-mini ul>li:last-child>a,.pagination-small ul>li:last-child>a,.pagination-mini ul>li:last-child>span,.pagination-small ul>li:last-child>span{-webkit-border-top-right-radius:3px;border-top-right-radius:3px;-webkit-border-bottom-right-radius:3px;border-bottom-right-radius:3px;-moz-border-radius-topright:3px;-moz-border-radius-bottomright:3px}.pagination-small ul>li>a,.pagination-small ul>li>span{padding:2px 10px;font-size:11.9px}.pagination-mini ul>li>a,.pagination-mini ul>li>span{padding:0 6px;font-size:10.5px}.pager{margin:20px 0;text-align:center;list-style:none;*zoom:1}.pager:before,.pager:after{display:table;line-height:0;content:""}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#f5f5f5}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:default;background-color:#fff}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop,.modal-backdrop.fade.in{opacity:.8;filter:alpha(opacity=80)}.modal{position:fixed;top:10%;left:50%;z-index:1050;width:560px;margin-left:-280px;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.3);*border:1px solid #999;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;outline:0;-webkit-box-shadow:0 3px 7px rgba(0,0,0,0.3);-moz-box-shadow:0 3px 7px rgba(0,0,0,0.3);box-shadow:0 3px 7px rgba(0,0,0,0.3);-webkit-background-clip:padding-box;-moz-background-clip:padding-box;background-clip:padding-box}.modal.fade{top:-25%;-webkit-transition:opacity .3s linear,top .3s ease-out;-moz-transition:opacity .3s linear,top .3s ease-out;-o-transition:opacity .3s linear,top .3s ease-out;transition:opacity .3s linear,top .3s ease-out}.modal.fade.in{top:10%}.modal-header{padding:9px 15px;border-bottom:1px solid #eee}.modal-header .close{margin-top:2px}.modal-header h3{margin:0;line-height:30px}.modal-body{position:relative;max-height:400px;padding:15px;overflow-y:auto}.modal-form{margin-bottom:0}.modal-footer{padding:14px 15px 15px;margin-bottom:0;text-align:right;background-color:#f5f5f5;border-top:1px solid #ddd;-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px;*zoom:1;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.modal-footer:before,.modal-footer:after{display:table;line-height:0;content:""}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.tooltip{position:absolute;z-index:1030;display:block;font-size:11px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.8;filter:alpha(opacity=80)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.popover-title:empty{display:none}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0}.thumbnails{margin-left:-20px;list-style:none;*zoom:1}.thumbnails:before,.thumbnails:after{display:table;line-height:0;content:""}.thumbnails:after{clear:both}.row-fluid .thumbnails{margin-left:0}.thumbnails>li{float:left;margin-bottom:20px;margin-left:20px}.thumbnail{display:block;padding:4px;line-height:20px;border:1px solid #ddd;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.055);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.055);box-shadow:0 1px 3px rgba(0,0,0,0.055);-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}a.thumbnail:hover,a.thumbnail:focus{border-color:#08c;-webkit-box-shadow:0 1px 4px rgba(0,105,214,0.25);-moz-box-shadow:0 1px 4px rgba(0,105,214,0.25);box-shadow:0 1px 4px rgba(0,105,214,0.25)}.thumbnail>img{display:block;max-width:100%;margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#555}.media,.media-body{overflow:hidden;*overflow:visible;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{margin-left:0;list-style:none}.label,.badge{display:inline-block;padding:2px 4px;font-size:11.844px;font-weight:bold;line-height:14px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);white-space:nowrap;vertical-align:baseline;background-color:#999}.label{-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.badge{padding-right:9px;padding-left:9px;-webkit-border-radius:9px;-moz-border-radius:9px;border-radius:9px}.label:empty,.badge:empty{display:none}a.label:hover,a.label:focus,a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.label-important,.badge-important{background-color:#b94a48}.label-important[href],.badge-important[href]{background-color:#953b39}.label-warning,.badge-warning{background-color:#f89406}.label-warning[href],.badge-warning[href]{background-color:#c67605}.label-success,.badge-success{background-color:#468847}.label-success[href],.badge-success[href]{background-color:#356635}.label-info,.badge-info{background-color:#3a87ad}.label-info[href],.badge-info[href]{background-color:#2d6987}.label-inverse,.badge-inverse{background-color:#333}.label-inverse[href],.badge-inverse[href]{background-color:#1a1a1a}.btn .label,.btn .badge{position:relative;top:-1px}.btn-mini .label,.btn-mini .badge{top:0}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-ms-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f7f7f7;background-image:-moz-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f5f5f5),to(#f9f9f9));background-image:-webkit-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-o-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:linear-gradient(to bottom,#f5f5f5,#f9f9f9);background-repeat:repeat-x;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress .bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e90d2;background-image:-moz-linear-gradient(top,#149bdf,#0480be);background-image:-webkit-gradient(linear,0 0,0 100%,from(#149bdf),to(#0480be));background-image:-webkit-linear-gradient(top,#149bdf,#0480be);background-image:-o-linear-gradient(top,#149bdf,#0480be);background-image:linear-gradient(to bottom,#149bdf,#0480be);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf',endColorstr='#ff0480be',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-transition:width .6s ease;-moz-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress .bar+.bar{-webkit-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15)}.progress-striped .bar{background-color:#149bdf;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;-moz-background-size:40px 40px;-o-background-size:40px 40px;background-size:40px 40px}.progress.active .bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-danger .bar,.progress .bar-danger{background-color:#dd514c;background-image:-moz-linear-gradient(top,#ee5f5b,#c43c35);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#c43c35));background-image:-webkit-linear-gradient(top,#ee5f5b,#c43c35);background-image:-o-linear-gradient(top,#ee5f5b,#c43c35);background-image:linear-gradient(to bottom,#ee5f5b,#c43c35);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffc43c35',GradientType=0)}.progress-danger.progress-striped .bar,.progress-striped .bar-danger{background-color:#ee5f5b;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-success .bar,.progress .bar-success{background-color:#5eb95e;background-image:-moz-linear-gradient(top,#62c462,#57a957);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#57a957));background-image:-webkit-linear-gradient(top,#62c462,#57a957);background-image:-o-linear-gradient(top,#62c462,#57a957);background-image:linear-gradient(to bottom,#62c462,#57a957);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff57a957',GradientType=0)}.progress-success.progress-striped .bar,.progress-striped .bar-success{background-color:#62c462;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-info .bar,.progress .bar-info{background-color:#4bb1cf;background-image:-moz-linear-gradient(top,#5bc0de,#339bb9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#339bb9));background-image:-webkit-linear-gradient(top,#5bc0de,#339bb9);background-image:-o-linear-gradient(top,#5bc0de,#339bb9);background-image:linear-gradient(to bottom,#5bc0de,#339bb9);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff339bb9',GradientType=0)}.progress-info.progress-striped .bar,.progress-striped .bar-info{background-color:#5bc0de;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-warning .bar,.progress .bar-warning{background-color:#faa732;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0)}.progress-warning.progress-striped .bar,.progress-striped .bar-warning{background-color:#fbb450;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.accordion{margin-bottom:20px}.accordion-group{margin-bottom:2px;border:1px solid #e5e5e5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.accordion-heading{border-bottom:0}.accordion-heading .accordion-toggle{display:block;padding:8px 15px}.accordion-toggle{cursor:pointer}.accordion-inner{padding:9px 15px;border-top:1px solid #e5e5e5}.carousel{position:relative;margin-bottom:20px;line-height:1}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-moz-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:40%;left:15px;width:40px;height:40px;margin-top:-20px;font-size:60px;font-weight:100;line-height:30px;color:#fff;text-align:center;background:#222;border:3px solid #fff;-webkit-border-radius:23px;-moz-border-radius:23px;border-radius:23px;opacity:.5;filter:alpha(opacity=50)}.carousel-control.right{right:15px;left:auto}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-indicators{position:absolute;top:15px;right:15px;z-index:5;margin:0;list-style:none}.carousel-indicators li{display:block;float:left;width:10px;height:10px;margin-left:5px;text-indent:-999px;background-color:#ccc;background-color:rgba(255,255,255,0.25);border-radius:5px}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:0;bottom:0;left:0;padding:15px;background:#333;background:rgba(0,0,0,0.75)}.carousel-caption h4,.carousel-caption p{line-height:20px;color:#fff}.carousel-caption h4{margin:0 0 5px}.carousel-caption p{margin-bottom:0}.hero-unit{padding:60px;margin-bottom:30px;font-size:18px;font-weight:200;line-height:30px;color:inherit;background-color:#eee;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.hero-unit h1{margin-bottom:0;font-size:60px;line-height:1;letter-spacing:-1px;color:inherit}.hero-unit li{line-height:30px}.pull-right{float:right}.pull-left{float:left}.hide{display:none}.show{display:block}.invisible{visibility:hidden}.affix{position:fixed} diff --git a/docs/theme/docker/static/css/main.css b/docs/theme/docker/static/css/main.css deleted file mode 100755 index ce4ba7b869..0000000000 --- a/docs/theme/docker/static/css/main.css +++ /dev/null @@ -1,477 +0,0 @@ -.debug { - border: 2px dotted red !important; - box-sizing: border-box; - -moz-box-sizing: border-box; -} -body { - min-width: 940px; - font-family: "Cabin", "Helvetica Neue", Helvetica, Arial, sans-serif; -} -p a { - text-decoration: underline; -} -p a.btn { - text-decoration: none; -} -.brand.logo a { - text-decoration: none; -} -.navbar .navbar-inner { - padding-left: 0px; - padding-right: 0px; -} -.navbar .nav li a { - padding: 24.2857142855px 17px 24.2857142855px; - color: #777777; - text-decoration: none; - text-shadow: 0 1px 0 #f2f2f2; -} -.navbar .nav > li { - float: left; -} -.nav-underline { - height: 6px; - background-color: #71afc0; -} -.nav-login li a { - color: white; - padding: 10px 15px 10px; -} -.navbar .brand { - margin-left: 0px; - float: left; - display: block; -} -.navbar-inner { - min-height: 70px; - padding-left: 20px; - padding-right: 20px; - background-color: #ededed; - background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5)); - background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0); - border: 1px solid #c7c7c7; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); -} -.brand-logo a { - color: white; -} -.brand-logo a img { - width: auto; -} -.inline-icon { - margin-bottom: 6px; -} -.row { - margin-top: 15px; - margin-bottom: 15px; -} -div[class*='span'] { - -moz-box-sizing: border-box; - box-sizing: border-box; -} -.box { - padding: 30px; - background-color: white; - margin-top: 8px; -} -.paper { - background-color: white; - padding-top: 30px; - padding-bottom: 30px; -} -.copy-headline { - margin-top: 0px; -} -.box h1, -.box h2, -.box h3, -.box h4 { - margin-top: -5px; -} -.nested { - padding: 30px; -} -.box.div { - padding: 30px; -} -span.read-more { - margin-left: 15px; - white-space: nowrap; -} -.forcetopalign { - margin-top: 15px !important; -} -.forcetopmargin { - margin-top: 23px !important; -} -.forceleftalign { - margin-left: 15px !important; -} -.forceleftmargin { - margin-left: 21px !important; -} -.textcenter { - text-align: center; -} -.textright { - text-align: right; -} -.textsmaller { - font-size: 12px; -} -.modal-backdrop { - opacity: 0.4; -} -/* generic page copy styles */ -.copy-headline h1 { - font-size: 21px; -} -/* ======================= - Sticky footer -======================= */ -html, -body { - height: 100%; - /* The html and body elements cannot have any padding or margin. */ - -} -/* Wrapper for page content to push down footer */ -#wrap { - min-height: 100%; - height: auto !important; - height: 100%; - /* Negative indent footer by it's height */ - - margin: 0 auto -280px; -} -/* Set the fixed height of the footer here */ -#push-the-footer, -#footer { - height: 280px; -} -.main-row { - padding-top: 50px; -} -#footer .footer { - margin-top: 160px; -} -#footer .footer .ligaturesymbols { - font-size: 30px; - color: black; -} -#footer .footer .ligaturesymbols a { - color: black; -} -#footer .footer .footerlist h3, -#footer .footer .footerlist h4 { - /* correct the top alignment */ - - margin-top: 0px; -} -.footer-landscape-image { - position: absolute: - bottom: 0; - margin-bottom: 0; - background-image: url('https://www.docker.io/static/img/website-footer_clean.svg'); - background-repeat: repeat-x; - height: 280px; -} -.main-row { - margin-top: 40px; -} -.sidebar { - width: 215px; - float: left; -} -.main-content { - padding: 16px 18px inherit; - margin-left: 230px; - /* space for sidebar */ - -} -/* ======================= - Social footer -======================= */ -.social { - margin-left: 0px; - margin-top: 15px; -} -.social .twitter, -.social .github, -.social .googleplus, -.social .facebook, -.social .slideshare, -.social .linkedin, -.social .flickr, -.social .youtube, -.social .reddit { - background: url("../img/social/docker_social_logos.png") no-repeat transparent; - display: inline-block; - height: 32px; - overflow: hidden; - text-indent: 9999px; - width: 32px; - margin-right: 5px; -} -.social :hover { - -webkit-transform: rotate(-10deg); - -moz-transform: rotate(-10deg); - -o-transform: rotate(-10deg); - -ms-transform: rotate(-10deg); - transform: rotate(-10deg); -} -.social .twitter { - background-position: -160px 0px; -} -.social .reddit { - background-position: -256px 0px; -} -.social .github { - background-position: -64px 0px; -} -.social .googleplus { - background-position: -96px 0px; -} -.social .facebook { - background-position: 0px 0px; -} -.social .slideshare { - background-position: -128px 0px; -} -.social .youtube { - background-position: -192px 0px; -} -.social .flickr { - background-position: -32px 0px; -} -.social .linkedin { - background-position: -224px 0px; -} -form table th { - vertical-align: top; - text-align: right; - white-space: nowrap; -} -form .labeldd label { - font-weight: bold; -} -form .helptext { - font-size: 12px; - margin-top: -4px; - margin-bottom: 10px; -} -form .fielddd input { - width: 250px; -} -form .error { - color: #a30000; -} -div.alert.alert-block { - margin-bottom: 15px; -} -/* ======================= ======================= - Documentation -========================= ========================= */ -/* ======================= - Styles for the sidebar -========================= */ -.page-title { - background-color: white; - border: 1px solid transparent; - text-align: center; - width: 100%; -} -.page-title h4 { - font-size: 20px; -} -.bs-docs-sidebar { - padding-left: 5px; - max-width: 100%; - box-sizing: border-box; - -moz-box-sizing: border-box; - margin-top: 18px; -} -.bs-docs-sidebar ul { - list-style: none; - margin-left: 0px; -} -.bs-docs-sidebar .toctree-l2 > ul { - width: 100%; -} -.bs-docs-sidebar ul > li.toctree-l1.has-children { - background-image: url('../img/menu_arrow_right.gif'); - background-repeat: no-repeat; - background-position: 13px 13px; - list-style-type: none; - padding: 0px 0px 0px 0px; - vertical-align: middle; -} -.bs-docs-sidebar ul > li.toctree-l1.has-children.open { - background-image: url('../img/menu_arrow_down.gif'); -} -.bs-docs-sidebar ul > li > a { - box-sizing: border-box; - -moz-box-sizing: border-box; - width: 100%; - display: inline-block; - padding-top: 8px; - padding-bottom: 8px; - padding-left: 35px; - padding-right: 20px; - font-size: 14px; - border-bottom: 1.5px solid #595959; - line-height: 20px; -} -.bs-docs-sidebar ul > li:first-child.active > a { - border-top: 1.5px solid #595959; -} -.bs-docs-sidebar ul > li:last-child > a { - border-bottom: none; -} -.bs-docs-sidebar ul > li:last-child.active > a { - border-bottom: 1.5px solid #595959; -} -.bs-docs-sidebar ul > li.active > a { - border-right: 1.5px solid #595959; - border-left: 1.5px solid #595959; - color: #394d54; -} -.bs-docs-sidebar ul > li:hover { - background-color: #e8e8e8; -} -.bs-docs-sidebar.toctree-l3 ul { - display: inherit; - margin-left: 15px; - font-size: smaller; -} -.bs-docs-sidebar .toctree-l3 a { - border: none; - font-size: 12px; - line-height: 15px; -} -.bs-docs-sidebar ul > li > ul { - display: none; -} -.bs-docs-sidebar ul > li.current > ul { - display: inline-block; - padding-left: 0px; - width: 100%; -} -.toctree-l2.current > a { - font-weight: bold; -} -.toctree-l2.current { - border: 1.5px solid #595959; - color: #394d54; -} -/* ===================================== - Styles for the floating version widget -====================================== */ -.version-flyer { - position: fixed; - float: right; - right: 0; - bottom: 40px; - background-color: #E0E0E0; - border: 1px solid #88BABC; - padding: 5px; - font-size: larger; - max-width: 300px; -} -.version-flyer .content { - padding-right: 45px; - margin-top: 7px; - margin-left: 7px; - background-image: url('../img/container3.png'); - background-position: right center; - background-repeat: no-repeat; -} -.version-flyer .active-slug { - visibility: visible; - display: inline-block; - font-weight: bolder; -} -.version-flyer:hover .alternative { - animation-duration: 1s; - display: inline-block; -} -.version-flyer .version-note { - font-size: 16px; - color: black; -} -/* ===================================== - Styles for -====================================== */ -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} -.headerlink { - font-size: smaller; - color: #666; - font-weight: bold; - float: right; - visibility: hidden; -} -h2, h3, h4, h5, h6 { - margin-top: 0.7em; -} -/* ===================================== - Miscellaneous information -====================================== */ -.admonition.warning, -.admonition.note, -.admonition.seealso, -.admonition.todo { - border: 3px solid black; - padding: 10px; - margin: 5px auto 10px; -} -.admonition .admonition-title { - font-size: larger; -} -.admonition.warning, -.admonition.danger { - border-color: #ac0004; -} -.admonition.note { - border-color: #cbc200; -} -.admonition.todo { - border-color: orange; -} -.admonition.seealso { - border-color: #23cb1f; -} -/* Add styles for other types of comments */ -.versionchanged, -.versionadded, -.versionmodified, -.deprecated { - font-size: larger; - font-weight: bold; -} -.versionchanged { - color: lightseagreen; -} -.versionadded { - color: mediumblue; -} -.deprecated { - color: orangered; -} diff --git a/docs/theme/docker/static/css/main.less b/docs/theme/docker/static/css/main.less deleted file mode 100644 index e248e21c08..0000000000 --- a/docs/theme/docker/static/css/main.less +++ /dev/null @@ -1,691 +0,0 @@ -// Main CSS configuration file -// by Thatcher Peskens, thatcher@dotcloud.com -// -// Please note variables.less is customized to include custom font, background-color, and link colors. - - -@import "variables.less"; - -// Variables for main.less -// ----------------------- - -@box-top-margin: 8px; -@box-padding-size: 30px; -@docker-background-color: #71AFC0; -@very-dark-sea-green: #394D54; - -// Custom colors for Docker -// -------------------------- -@gray-super-light: #F2F2F2; -@deep-red: #A30000; -@deep-blue: #1B2033; -@deep-green: #007035; -@link-blue: #213B8F; - - -.debug { - border: 2px dotted red !important; - box-sizing: border-box; - -moz-box-sizing: border-box; -} - - -// Other custom colors for Docker -// -------------------------- - -// ** are defined in sources/less/variables ** -//@import "bootstrap/variables.less"; - - -// Styles generic for each and every page -// ----------------------------------- // ----------------------------------- - - -// moving body down to make place for fixed navigation -body { - min-width: 940px; - font-family: @font-family-base; - -} - - -p a { - text-decoration: underline; - - &.btn { - text-decoration: none; - } - -} - -.brand.logo a { - text-decoration: none; -} - -// Styles for top navigation -// ---------------------------------- -.navbar .navbar-inner { - padding-left: 0px; - padding-right: 0px; -} - -.navbar .nav { - li a { - padding: ((@navbar-height - @line-height-base) / 2) 17px ((@navbar-height - @line-height-base) / 2); - color: #777777; - text-decoration: none; - text-shadow: 0 1px 0 #f2f2f2; - } -} - - -.navbar .nav > li { - float: left; -} - -.nav-underline { - height: 6px; - background-color: @docker-background-color; -} - -.nav-login { - li { - a { - color: white; - padding: 10px 15px 10px; - } - } -} - - -.navbar .brand { - margin-left: 0px; - float: left; - display: block; -} - -.navbar-inner { - min-height: 70px; - padding-left: 20px; - padding-right: 20px; - background-color: #ededed; - background-image: -moz-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f2f2f2), to(#e5e5e5)); - background-image: -webkit-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: -o-linear-gradient(top, #f2f2f2, #e5e5e5); - background-image: linear-gradient(to bottom, #f2f2f2, #e5e5e5); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2', endColorstr='#ffe5e5e5', GradientType=0); - border: 1px solid #c7c7c7; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - -webkit-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - -moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); - box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065); -} - -.brand-logo a { - color: white; - img { - width: auto; - } -} - -.logo { -// background-color: #A30000; -// color: white; -} - -.inline-icon { - margin-bottom: 6px; -} - -// Bootstrap elements -// ---------------------------------- - -.row { - margin-top: 15px; - margin-bottom: 15px; -} - -.container { - // background-color: green; -} - -// Styles on blocks of content -// ---------------------------------- - -// everything which is a block should have box-sizing: border-box; - -div[class*='span'] -{ - -moz-box-sizing: border-box; - box-sizing: border-box; -} - - -// Box for making white with a border, and some nice spacings -.box { - padding: @box-padding-size; - background-color: white; - margin-top: @box-top-margin; -} - -.paper { - background-color: white; - padding-top: 30px; - padding-bottom: 30px; -} - -.copy-headline { - margin-top: 0px; -// border-bottom: 1.2px solid @veryDarkSeaGreen; -} - -.box { - h1, h2, h3, h4 { - margin-top: -5px; - } -} - -.nested { - padding: @box-padding-size; -} - -.box.div { - padding: @box-padding-size; -} - -span.read-more { - margin-left: 15px; - white-space: nowrap; -} - - -// set a top margin of @box-top-margin + 8 px to make it show a margin -//instead of the div being flush against the side. Typically only -// required for a stacked div in a column, w.o. using row. -.forcetopalign { - margin-top: 15px !important; -} -.forcetopmargin { - margin-top: 23px !important; -} -.forceleftalign { - margin-left: 15px !important; -} -.forceleftmargin { - margin-left: 21px !important; -} - - -// simple text aligns -.textcenter { - text-align: center; -} - -.textright { - text-align: right; -} - -.textsmaller { - font-size: @font-size-small; -} - -.modal-backdrop { - opacity: 0.4; -} - - -/* generic page copy styles */ - -.copy-headline h1 { - font-size: 21px; -} - - -/* ======================= - Sticky footer -======================= */ - -@sticky-footer-height: 280px; - -html, -body { - height: 100%; - /* The html and body elements cannot have any padding or margin. */ -} - -/* Wrapper for page content to push down footer */ -#wrap { - min-height: 100%; - height: auto !important; - height: 100%; - /* Negative indent footer by it's height */ - margin: 0 auto -@sticky-footer-height; -} - -/* Set the fixed height of the footer here */ -#push-the-footer, -#footer { - height: @sticky-footer-height; -} - -#footer { -// margin-bottom: -60px; -// margin-top: 160px; -} - -.main-row { - padding-top: @navbar-height; -} - - -// Styles on the footer -// ---------------------------------- - -// -#footer .footer { - margin-top: 160px; - .ligaturesymbols { - font-size: 30px; - color: black; - a { - color: black; - } - } - - .footerlist { - h3, h4 { - /* correct the top alignment */ - margin-top: 0px; - } - } - -} - -.footer-landscape-image { - position: absolute: - bottom: 0; - margin-bottom: 0; - background-image: url('https://www.docker.io/static/img/website-footer_clean.svg'); - background-repeat: repeat-x; - height: @sticky-footer-height; -} - -.main-row { - margin-top: 40px; -} - -.sidebar { - width: 215px; - float: left; -} - -.main-content { - padding: 16px 18px inherit; - margin-left: 230px; /* space for sidebar */ -} - - - -/* ======================= - Social footer -======================= */ - -.social { - margin-left: 0px; - margin-top: 15px; -} - -.social { - .twitter, .github, .googleplus, .facebook, .slideshare, .linkedin, .flickr, .youtube, .reddit { - background: url("../img/social/docker_social_logos.png") no-repeat transparent; - display: inline-block; - height: 32px; - overflow: hidden; - text-indent: 9999px; - width: 32px; - margin-right: 5px; - } -} - -.social :hover { - -webkit-transform: rotate(-10deg); - -moz-transform: rotate(-10deg); - -o-transform: rotate(-10deg); - -ms-transform: rotate(-10deg); - transform: rotate(-10deg); -} - -.social .twitter { - background-position: -160px 0px; -} - -.social .reddit { - background-position: -256px 0px; -} - -.social .github { - background-position: -64px 0px; -} - -.social .googleplus { - background-position: -96px 0px; -} - -.social .facebook { - background-position: -0px 0px; -} - -.social .slideshare { - background-position: -128px 0px; -} - -.social .youtube { - background-position: -192px 0px; -} - -.social .flickr { - background-position: -32px 0px; -} - -.social .linkedin { - background-position: -224px 0px; -} - - - -// Styles on the forms -// ---------------------------------- - -form table { - th { - vertical-align: top; - text-align: right; - white-space: nowrap; - } -} - -form { - .labeldd label { - font-weight: bold; - } - - .helptext { - font-size: @font-size-small; - margin-top: -4px; - margin-bottom: 10px; - } - - .fielddd input { - width: 250px; - } - - .error { - color: @deep-red; - } - - [type=submit] { -// margin-top: -8px; - } -} - -div.alert.alert-block { - margin-bottom: 15px; -} - -/* ======================= ======================= - Documentation -========================= ========================= */ - - -/* ======================= - Styles for the sidebar -========================= */ - - -@sidebar-navigation-border: 1.5px solid #595959; -@sidebar-navigation-width: 225px; - - -.page-title { - // border-bottom: 1px solid #bbbbbb; - background-color: white; - border: 1px solid transparent; - text-align: center; - width: 100%; - h4 { - font-size: 20px; - } -} - -.bs-docs-sidebar { - padding-left: 5px; - max-width: 100%; - box-sizing: border-box; - -moz-box-sizing: border-box; - margin-top: 18px; - - ul { - list-style: none; - margin-left: 0px; - } - - .toctree-l2 > ul { - width: 100%; - } - - ul > li { - &.toctree-l1.has-children { - background-image: url('../img/menu_arrow_right.gif'); - background-repeat: no-repeat; - background-position: 13px 13px; - list-style-type: none; - // margin-left: px; - padding: 0px 0px 0px 0px; - vertical-align: middle; - - &.open { - background-image: url('../img/menu_arrow_down.gif'); - } - } - - & > a { - box-sizing: border-box; - -moz-box-sizing: border-box; - width: 100%; - display:inline-block; - padding-top: 8px; - padding-bottom: 8px; - padding-left: 35px; - padding-right: 20px; - font-size: @font-size-base; - border-bottom: @sidebar-navigation-border; - line-height: 20px; - } - - &:first-child.active > a { - border-top: @sidebar-navigation-border; - } - - &:last-child > a { - border-bottom: none; - } - - &:last-child.active > a { - border-bottom: @sidebar-navigation-border; - } - - &.active > a { - border-right: @sidebar-navigation-border; - border-left: @sidebar-navigation-border; - color: @very-dark-sea-green; - } - - &:hover { - background-color: #e8e8e8; - } - } - - &.toctree-l3 ul { - display: inherit; - - margin-left: 15px; - font-size: smaller; - } - - .toctree-l3 a { - border: none; - font-size: 12px; - line-height: 15px; - } - - ul > li > ul { - display: none; - } - - ul > li.current > ul { - display: inline-block; - padding-left: 0px; - width: 100%; - } -} - -.toctree-l2 { - &.current > a { - font-weight: bold; - } - &.current { - border: 1.5px solid #595959; - color: #394d54; - } -} - - -/* ===================================== - Styles for the floating version widget -====================================== */ - -.version-flyer { - position: fixed; - float: right; - right: 0; - bottom: 40px; - background-color: #E0E0E0; - border: 1px solid #88BABC; - padding: 5px; - font-size: larger; - max-width: 300px; - - .content { - padding-right: 45px; - margin-top: 7px; - margin-left: 7px; - background-image: url('../img/container3.png'); - background-position: right center; - background-repeat: no-repeat; - } - - .alternative { - } - - .active-slug { - visibility: visible; - display: inline-block; - font-weight: bolder; - } - - &:hover .alternative { - animation-duration: 1s; - display: inline-block; - } - - .version-note { - font-size: 16px; - color: black; - } - -} - -/* ===================================== - Styles for -====================================== */ - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -.headerlink { - font-size: smaller; - color: #666; - font-weight: bold; - float: right; - visibility: hidden; -} - -h2, h3, h4, h5, h6 { - margin-top: 0.7em; -} - -/* ===================================== - Miscellaneous information -====================================== */ - -.admonition { - &.warning, &.note, &.seealso, &.todo { - border: 3px solid black; - padding: 10px; - margin: 5px auto 10px; - } - - .admonition-title { - font-size: larger; - } - - &.warning, &.danger { - border-color: #ac0004; - } - - &.note { - border-color: #cbc200; - } - - &.todo { - border-color: orange; - } - - &.seealso { - border-color: #23cb1f; - } - -} - -/* Add styles for other types of comments */ - -.versionchanged, -.versionadded, -.versionmodified, -.deprecated { - font-size: larger; - font-weight: bold; -} - -.versionchanged { - color: lightseagreen; -} - -.versionadded { - color: mediumblue; -} - -.deprecated { - color: orangered; -} diff --git a/docs/theme/docker/static/css/variables.css b/docs/theme/docker/static/css/variables.css deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/theme/docker/static/css/variables.less b/docs/theme/docker/static/css/variables.less deleted file mode 100644 index cc1d764364..0000000000 --- a/docs/theme/docker/static/css/variables.less +++ /dev/null @@ -1,622 +0,0 @@ -// -// Variables -// -------------------------------------------------- - - -// Global values -// -------------------------------------------------- - - -// Grays -// ------------------------- - -@gray-darker: lighten(#000, 13.5%); // #222 -@gray-dark: lighten(#000, 20%); // #333 -@gray: lighten(#000, 33.5%); // #555 -@gray-light: lighten(#000, 60%); // #999 -@gray-lighter: lighten(#000, 93.5%); // #eee - -// Brand colors -// ------------------------- - -@brand-primary: #428bca; -@brand-success: #5cb85c; -@brand-warning: #f0ad4e; -@brand-danger: #d9534f; -@brand-info: #5bc0de; - -// Scaffolding -// ------------------------- - -@body-bg: #fff; -@text-color: @gray-dark; - -// Links -// ------------------------- - -@link-color: @brand-primary; -@link-hover-color: darken(@link-color, 15%); - -// Typography -// ------------------------- - -@font-family-sans-serif: "Cabin", "Helvetica Neue", Helvetica, Arial, sans-serif; -@font-family-serif: Georgia, "Times New Roman", Times, serif; -@font-family-monospace: Monaco, Menlo, Consolas, "Courier New", monospace; -@font-family-base: @font-family-sans-serif; - -@font-size-base: 14px; -@font-size-large: ceil(@font-size-base * 1.25); // ~18px -@font-size-small: ceil(@font-size-base * 0.85); // ~12px - -@line-height-base: 1.428571429; // 20/14 -@line-height-computed: floor(@font-size-base * @line-height-base); // ~20px - -@headings-font-family: @font-family-base; -@headings-font-weight: 500; -@headings-line-height: 1.1; - -// Iconography -// ------------------------- - -@icon-font-path: "../fonts/"; -@icon-font-name: "glyphicons-halflings-regular"; - - -// Components -// ------------------------- -// Based on 14px font-size and 1.428 line-height (~20px to start) - -@padding-base-vertical: 6px; -@padding-base-horizontal: 12px; - -@padding-large-vertical: 10px; -@padding-large-horizontal: 16px; - -@padding-small-vertical: 5px; -@padding-small-horizontal: 10px; - -@line-height-large: 1.33; -@line-height-small: 1.5; - -@border-radius-base: 4px; -@border-radius-large: 6px; -@border-radius-small: 3px; - -@component-active-bg: @brand-primary; - -@caret-width-base: 4px; -@caret-width-large: 5px; - -// Tables -// ------------------------- - -@table-cell-padding: 8px; -@table-condensed-cell-padding: 5px; - -@table-bg: transparent; // overall background-color -@table-bg-accent: #f9f9f9; // for striping -@table-bg-hover: #f5f5f5; -@table-bg-active: @table-bg-hover; - -@table-border-color: #ddd; // table and cell border - - -// Buttons -// ------------------------- - -@btn-font-weight: normal; - -@btn-default-color: #333; -@btn-default-bg: #fff; -@btn-default-border: #ccc; - -@btn-primary-color: #fff; -@btn-primary-bg: @brand-primary; -@btn-primary-border: darken(@btn-primary-bg, 5%); - -@btn-success-color: #fff; -@btn-success-bg: @brand-success; -@btn-success-border: darken(@btn-success-bg, 5%); - -@btn-warning-color: #fff; -@btn-warning-bg: @brand-warning; -@btn-warning-border: darken(@btn-warning-bg, 5%); - -@btn-danger-color: #fff; -@btn-danger-bg: @brand-danger; -@btn-danger-border: darken(@btn-danger-bg, 5%); - -@btn-info-color: #fff; -@btn-info-bg: @brand-info; -@btn-info-border: darken(@btn-info-bg, 5%); - -@btn-link-disabled-color: @gray-light; - - -// Forms -// ------------------------- - -@input-bg: #fff; -@input-bg-disabled: @gray-lighter; - -@input-color: @gray; -@input-border: #ccc; -@input-border-radius: @border-radius-base; -@input-border-focus: #66afe9; - -@input-color-placeholder: @gray-light; - -@input-height-base: (@line-height-computed + (@padding-base-vertical * 2) + 2); -@input-height-large: (floor(@font-size-large * @line-height-large) + (@padding-large-vertical * 2) + 2); -@input-height-small: (floor(@font-size-small * @line-height-small) + (@padding-small-vertical * 2) + 2); - -@legend-color: @gray-dark; -@legend-border-color: #e5e5e5; - -@input-group-addon-bg: @gray-lighter; -@input-group-addon-border-color: @input-border; - - -// Dropdowns -// ------------------------- - -@dropdown-bg: #fff; -@dropdown-border: rgba(0,0,0,.15); -@dropdown-fallback-border: #ccc; -@dropdown-divider-bg: #e5e5e5; - -@dropdown-link-active-color: #fff; -@dropdown-link-active-bg: @component-active-bg; - -@dropdown-link-color: @gray-dark; -@dropdown-link-hover-color: #fff; -@dropdown-link-hover-bg: @dropdown-link-active-bg; - -@dropdown-link-disabled-color: @gray-light; - -@dropdown-header-color: @gray-light; - -@dropdown-caret-color: #000; - - -// COMPONENT VARIABLES -// -------------------------------------------------- - - -// Z-index master list -// ------------------------- -// Used for a bird's eye view of components dependent on the z-axis -// Try to avoid customizing these :) - -@zindex-navbar: 1000; -@zindex-dropdown: 1000; -@zindex-popover: 1010; -@zindex-tooltip: 1030; -@zindex-navbar-fixed: 1030; -@zindex-modal-background: 1040; -@zindex-modal: 1050; - -// Media queries breakpoints -// -------------------------------------------------- - -// Extra small screen / phone -@screen-xs: 480px; -@screen-phone: @screen-xs; - -// Small screen / tablet -@screen-sm: 768px; -@screen-tablet: @screen-sm; - -// Medium screen / desktop -@screen-md: 992px; -@screen-desktop: @screen-md; - -// Large screen / wide desktop -@screen-lg: 1600px; -@screen-lg-desktop: @screen-lg; - -// So media queries don't overlap when required, provide a maximum -@screen-xs-max: (@screen-sm - 1); -@screen-sm-max: (@screen-md - 1); -@screen-md-max: (@screen-lg - 1); - - -// Grid system -// -------------------------------------------------- - -// Number of columns in the grid system -@grid-columns: 12; -// Padding, to be divided by two and applied to the left and right of all columns -@grid-gutter-width: 30px; -// Point at which the navbar stops collapsing -@grid-float-breakpoint: @screen-desktop; - - -// Navbar -// ------------------------- - - -// Basics of a navbar -@navbar-height: 50px; -@navbar-margin-bottom: @line-height-computed; -@navbar-default-color: #777; -@navbar-default-bg: #f8f8f8; -@navbar-default-border: darken(@navbar-default-bg, 6.5%); -@navbar-border-radius: @border-radius-base; -@navbar-padding-horizontal: floor(@grid-gutter-width / 2); -@navbar-padding-vertical: ((@navbar-height - @line-height-computed) / 2); - -// Navbar links -@navbar-default-link-color: #777; -@navbar-default-link-hover-color: #333; -@navbar-default-link-hover-bg: transparent; -@navbar-default-link-active-color: #555; -@navbar-default-link-active-bg: darken(@navbar-default-bg, 6.5%); -@navbar-default-link-disabled-color: #ccc; -@navbar-default-link-disabled-bg: transparent; - -// Navbar brand label -@navbar-default-brand-color: @navbar-default-link-color; -@navbar-default-brand-hover-color: darken(@navbar-default-link-color, 10%); -@navbar-default-brand-hover-bg: transparent; - -// Navbar toggle -@navbar-default-toggle-hover-bg: #ddd; -@navbar-default-toggle-icon-bar-bg: #ccc; -@navbar-default-toggle-border-color: #ddd; - - -// Inverted navbar -// -// Reset inverted navbar basics -@navbar-inverse-color: @gray-light; -@navbar-inverse-bg: #222; -@navbar-inverse-border: darken(@navbar-inverse-bg, 10%); - -// Inverted navbar links -@navbar-inverse-link-color: @gray-light; -@navbar-inverse-link-hover-color: #fff; -@navbar-inverse-link-hover-bg: transparent; -@navbar-inverse-link-active-color: @navbar-inverse-link-hover-color; -@navbar-inverse-link-active-bg: darken(@navbar-inverse-bg, 10%); -@navbar-inverse-link-disabled-color: #444; -@navbar-inverse-link-disabled-bg: transparent; - -// Inverted navbar brand label -@navbar-inverse-brand-color: @navbar-inverse-link-color; -@navbar-inverse-brand-hover-color: #fff; -@navbar-inverse-brand-hover-bg: transparent; - -// Inverted navbar search -// Normal navbar needs no special styles or vars -@navbar-inverse-search-bg: lighten(@navbar-inverse-bg, 25%); -@navbar-inverse-search-bg-focus: #fff; -@navbar-inverse-search-border: @navbar-inverse-bg; -@navbar-inverse-search-placeholder-color: #ccc; - -// Inverted navbar toggle -@navbar-inverse-toggle-hover-bg: #333; -@navbar-inverse-toggle-icon-bar-bg: #fff; -@navbar-inverse-toggle-border-color: #333; - - -// Navs -// ------------------------- - -@nav-link-padding: 10px 15px; -@nav-link-hover-bg: @gray-lighter; - -@nav-disabled-link-color: @gray-light; -@nav-disabled-link-hover-color: @gray-light; - -@nav-open-link-hover-color: #fff; -@nav-open-caret-border-color: #fff; - -// Tabs -@nav-tabs-border-color: #ddd; - -@nav-tabs-link-hover-border-color: @gray-lighter; - -@nav-tabs-active-link-hover-bg: @body-bg; -@nav-tabs-active-link-hover-color: @gray; -@nav-tabs-active-link-hover-border-color: #ddd; - -@nav-tabs-justified-link-border-color: #ddd; -@nav-tabs-justified-active-link-border-color: @body-bg; - -// Pills -@nav-pills-active-link-hover-bg: @component-active-bg; -@nav-pills-active-link-hover-color: #fff; - - -// Pagination -// ------------------------- - -@pagination-bg: #fff; -@pagination-border: #ddd; - -@pagination-hover-bg: @gray-lighter; - -@pagination-active-bg: @brand-primary; -@pagination-active-color: #fff; - -@pagination-disabled-color: @gray-light; - - -// Pager -// ------------------------- - -@pager-border-radius: 15px; -@pager-disabled-color: @gray-light; - - -// Jumbotron -// ------------------------- - -@jumbotron-padding: 30px; -@jumbotron-color: inherit; -@jumbotron-bg: @gray-lighter; - -@jumbotron-heading-color: inherit; - - -// Form states and alerts -// ------------------------- - -@state-warning-text: #c09853; -@state-warning-bg: #fcf8e3; -@state-warning-border: darken(spin(@state-warning-bg, -10), 3%); - -@state-danger-text: #b94a48; -@state-danger-bg: #f2dede; -@state-danger-border: darken(spin(@state-danger-bg, -10), 3%); - -@state-success-text: #468847; -@state-success-bg: #dff0d8; -@state-success-border: darken(spin(@state-success-bg, -10), 5%); - -@state-info-text: #3a87ad; -@state-info-bg: #d9edf7; -@state-info-border: darken(spin(@state-info-bg, -10), 7%); - - -// Tooltips -// ------------------------- -@tooltip-max-width: 200px; -@tooltip-color: #fff; -@tooltip-bg: #000; - -@tooltip-arrow-width: 5px; -@tooltip-arrow-color: @tooltip-bg; - - -// Popovers -// ------------------------- -@popover-bg: #fff; -@popover-max-width: 276px; -@popover-border-color: rgba(0,0,0,.2); -@popover-fallback-border-color: #ccc; - -@popover-title-bg: darken(@popover-bg, 3%); - -@popover-arrow-width: 10px; -@popover-arrow-color: #fff; - -@popover-arrow-outer-width: (@popover-arrow-width + 1); -@popover-arrow-outer-color: rgba(0,0,0,.25); -@popover-arrow-outer-fallback-color: #999; - - -// Labels -// ------------------------- - -@label-default-bg: @gray-light; -@label-primary-bg: @brand-primary; -@label-success-bg: @brand-success; -@label-info-bg: @brand-info; -@label-warning-bg: @brand-warning; -@label-danger-bg: @brand-danger; - -@label-color: #fff; -@label-link-hover-color: #fff; - - -// Modals -// ------------------------- -@modal-inner-padding: 20px; - -@modal-title-padding: 15px; -@modal-title-line-height: @line-height-base; - -@modal-content-bg: #fff; -@modal-content-border-color: rgba(0,0,0,.2); -@modal-content-fallback-border-color: #999; - -@modal-backdrop-bg: #000; -@modal-header-border-color: #e5e5e5; -@modal-footer-border-color: @modal-header-border-color; - - -// Alerts -// ------------------------- -@alert-padding: 15px; -@alert-border-radius: @border-radius-base; -@alert-link-font-weight: bold; - -@alert-success-bg: @state-success-bg; -@alert-success-text: @state-success-text; -@alert-success-border: @state-success-border; - -@alert-info-bg: @state-info-bg; -@alert-info-text: @state-info-text; -@alert-info-border: @state-info-border; - -@alert-warning-bg: @state-warning-bg; -@alert-warning-text: @state-warning-text; -@alert-warning-border: @state-warning-border; - -@alert-danger-bg: @state-danger-bg; -@alert-danger-text: @state-danger-text; -@alert-danger-border: @state-danger-border; - - -// Progress bars -// ------------------------- -@progress-bg: #f5f5f5; -@progress-bar-color: #fff; - -@progress-bar-bg: @brand-primary; -@progress-bar-success-bg: @brand-success; -@progress-bar-warning-bg: @brand-warning; -@progress-bar-danger-bg: @brand-danger; -@progress-bar-info-bg: @brand-info; - - -// List group -// ------------------------- -@list-group-bg: #fff; -@list-group-border: #ddd; -@list-group-border-radius: @border-radius-base; - -@list-group-hover-bg: #f5f5f5; -@list-group-active-color: #fff; -@list-group-active-bg: @component-active-bg; -@list-group-active-border: @list-group-active-bg; - -@list-group-link-color: #555; -@list-group-link-heading-color: #333; - - -// Panels -// ------------------------- -@panel-bg: #fff; -@panel-inner-border: #ddd; -@panel-border-radius: @border-radius-base; -@panel-footer-bg: #f5f5f5; - -@panel-default-text: @gray-dark; -@panel-default-border: #ddd; -@panel-default-heading-bg: #f5f5f5; - -@panel-primary-text: #fff; -@panel-primary-border: @brand-primary; -@panel-primary-heading-bg: @brand-primary; - -@panel-success-text: @state-success-text; -@panel-success-border: @state-success-border; -@panel-success-heading-bg: @state-success-bg; - -@panel-warning-text: @state-warning-text; -@panel-warning-border: @state-warning-border; -@panel-warning-heading-bg: @state-warning-bg; - -@panel-danger-text: @state-danger-text; -@panel-danger-border: @state-danger-border; -@panel-danger-heading-bg: @state-danger-bg; - -@panel-info-text: @state-info-text; -@panel-info-border: @state-info-border; -@panel-info-heading-bg: @state-info-bg; - - -// Thumbnails -// ------------------------- -@thumbnail-padding: 4px; -@thumbnail-bg: @body-bg; -@thumbnail-border: #ddd; -@thumbnail-border-radius: @border-radius-base; - -@thumbnail-caption-color: @text-color; -@thumbnail-caption-padding: 9px; - - -// Wells -// ------------------------- -@well-bg: #f5f5f5; - - -// Badges -// ------------------------- -@badge-color: #fff; -@badge-link-hover-color: #fff; -@badge-bg: @gray-light; - -@badge-active-color: @link-color; -@badge-active-bg: #fff; - -@badge-font-weight: bold; -@badge-line-height: 1; -@badge-border-radius: 10px; - - -// Breadcrumbs -// ------------------------- -@breadcrumb-bg: #f5f5f5; -@breadcrumb-color: #ccc; -@breadcrumb-active-color: @gray-light; - - -// Carousel -// ------------------------ - -@carousel-text-shadow: 0 1px 2px rgba(0,0,0,.6); - -@carousel-control-color: #fff; -@carousel-control-width: 15%; -@carousel-control-opacity: .5; -@carousel-control-font-size: 20px; - -@carousel-indicator-active-bg: #fff; -@carousel-indicator-border-color: #fff; - -@carousel-caption-color: #fff; - - -// Close -// ------------------------ -@close-color: #000; -@close-font-weight: bold; -@close-text-shadow: 0 1px 0 #fff; - - -// Code -// ------------------------ -@code-color: #c7254e; -@code-bg: #f9f2f4; - -@pre-bg: #f5f5f5; -@pre-color: @gray-dark; -@pre-border-color: #ccc; -@pre-scrollable-max-height: 340px; - -// Type -// ------------------------ -@text-muted: @gray-light; -@abbr-border-color: @gray-light; -@headings-small-color: @gray-light; -@blockquote-small-color: @gray-light; -@blockquote-border-color: @gray-lighter; -@page-header-border-color: @gray-lighter; - -// Miscellaneous -// ------------------------- - -// Hr border color -@hr-border: @gray-lighter; - -// Horizontal forms & lists -@component-offset-horizontal: 180px; - - -// Container sizes -// -------------------------------------------------- - -// Small screen / tablet -@container-tablet: ((720px + @grid-gutter-width)); - -// Medium screen / desktop -@container-desktop: ((940px + @grid-gutter-width)); - -// Large screen / wide desktop -@container-lg-desktop: ((1140px + @grid-gutter-width)); diff --git a/docs/theme/docker/static/favicon.png b/docs/theme/docker/static/favicon.png deleted file mode 100644 index ee01a5ee8a9ea542f17123ed9c587510a2e26aef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1475 zcmV;!1w8tRP)+(fofHYbct z7*K_dWFwmr35jN}Fd7pRjiT8b<4YG~_HK!0x)@EIj7*}LWu##SsR%e!jR>VYCg?1bo3s&Wq=Fp7Z;D&i6dO=Q)oO5$YSi5ceL*k{DJLkk$3kJZT{rY7j31U11z*grKSiAPVO#SF|u>ah6@nZo* znTD^7uUP$^M8L03ldH=i=`1QNGc`F*ICULuG|ln+1|;`C`=jEjri~lctbC@g|HP$_ zj?Y;I)}+EjX{>WtZLr$lY=YAo=LcY8M8J6|H&>1gO-+qaN7s(2ZQoio)wT6!yOi?h zbzuGGXF9dmywX;@Tzk7Xhtmde4kdzk0V=X!6P%wyDTRqD3pQ@qvJ!l(qkOP;-{8L! z78bPFTyt}r&JlKP+I;58zV$y;>Muo*pNLd$0VuXDeyP~;>21zh(&duM8y}ZSV{A;R zR3xz$6PC$pL#8f=PGYL9_ia75JvK>4YqgJ?qm56&%Auo7T6%8qx!s#)Ac%PXjIw{&`Lv)Au$G*L?lWfnJflDdp=P^ zlS)z`5_l{pFGM=jERQC%@F06h26GQ8Fd~&kzz?Z$S)Z}g7 zzTM*#7IPmD3)rAQ?a!SxZEhE85-A2|4fgT1|fRF6Hk^V`?%|LiwY>8$w3 z!YGri6TnL%w4XwZhl(Q-XK}_uJBtTMkU}|!3o9gk3I+JpQm93Z2%-tn4YF`)<3w90 zm%}Q{_P=@}J302IUoHKkb@*i8?~e`)19)PBO&3>gTDqX6Ku&vzOKRJJ0&YAj)UGXx z7-ro0PaGgoI)&C9|B6mWRYodhnlB8{e)j0@zPI0c>((kza>Rs{o}^kve1GQoQL_}) zZ8oJ)z+@D1rCLTQ#lpeA9r*fzKkc9Gb}uz~^+@LYX~ruNrF6hY+@p9X_BfGyOVrKWNI5X3W-lNpG6vQJRZh7& zu7q}WZ&WQRQH)fpU>z8r=FzhhPQUYFUvKZxxeU{9oH#Moa&_o8>9B+g6C4_+J)F`= z9MkMV9vT^-T_*?%rEch2qm+WgBGYB0TmdKG1vE}wrt{Rnm;btN&rjzP2T&rymUZje zFD-lQkk|34&IG~p42`~~BV*{Nke6%{6Vn7+OcqvUKp! zkH5R;weQbm{Y?M>PdwHA_~)M9`et{}=2p)bQmGWp`38PBJjILs!zis#2v}DWwzjrf zsJ$zUArns1cH!?Mi!UDE{pWpeyf&ZkEdT(^*KB-rL(dmpdFu1sj}{geYI`J(R|dxT z<$Gf@Gt^AipcEQMU8P9V=(+Kh(KEkpzxeJ;e>n8+h1(Io1)!E+WjB6t=L-)$vg)fH zot+OaDir*DLzcHcDDkV)S1E=uei9Oti)6~zuho~Xp3F~N-nVFEaPQv3htJ=x@SFfQ zfcTw{tX|%}Wa-1Xy6mRFsCC82R;Qz6zi*@Cb=A^PS dzXxy+;Gc5Vg6*aX&Q$;a002ovPDHLkV1knE)4Biv diff --git a/docs/theme/docker/static/img/container3.png b/docs/theme/docker/static/img/container3.png deleted file mode 100644 index 0e8b59f75daf8dfc2e3860d7f4eaa021684ab247..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 263 zcmeAS@N?(olHy`uVBq!ia0vp^av;pX1|+Qw)-3{3k|nMYCBgY=CFO}lsSJ)O`AMk? zp1FzXsX?iUDV2pMQ*D5XR(iTPhE&{2dUIpzbNj;#3~FtROgbKK*d6u=bUbaeVAR^i z&>`AzgQ0`9;Rmxoe8VQ*1zCCyM@1J%EMsGlxp3vc8OM(wo4L%Vv#ofV!0<$!X`}Ka zNAU%JoK$LC12(pv$Xbx}B1?~_BA`FYChOyhqs^C+el~83NHpndZ#~NH-duI%QDC>b zu3SO46&(FECl7_>ZA$qv?x_(tj(Yzw{`td(R?}z%XsUVSGS<<{Y4-89ZJ6 KT-G@yGywpIYhvU8 diff --git a/docs/theme/docker/static/img/docker-letters-logo.gif b/docs/theme/docker/static/img/docker-letters-logo.gif deleted file mode 100644 index b394ae4eccd7f6ed90586dc152546a872b163c1b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7287 zcmV--9EjsbNk%w1VITw40QUd@9v~q~OikqEdAaB_`!h=@2jI!H-NEiW(;6BH&XDjXdi zW@u?NHaB8pWj#MYT3lUKR##nLUsYFFFfuY=Vq+vGC__X=2@4It!NG!qgV)#B@9*#R z_4Rvvea+6!IXgUGU|>^KR_*QW!^6X%prA1`Gz<<8%F4>p)6;HmZ$w2#WoKyA)YP@L zwx*`0;^X73uCDX*^Xu#D4G$32)z#|i>X(<8!o$P+`}^$d><9`A5E2t2BqjLx_@AJl zzQ4b}z`)kl*7x`Kii?Z%^z^;Gz2oHM;^N|hgM|9}`XnYN6&Dxs^76pIz{kkQ)z;P; z8yu9BmAt*Z*4NigPf&)3hebw5=H}*{ou0hByuQA^kdcwY#Kftps(^ulUtnQRP*Hby zc=`GHKR`i3LqzWG?$p)QAtNNk#>Q@NaO30SprN7P;NW(5c>ezWmX?>lzrU51mYkiP z%gfC6_V%Wyr;?MCr>CcjjEtV2pPHMS(bCe0iHVbxl%1ZQy}rKF)6;BiZES6BtE;Tn z*w`QxVX8xxw^X2($ZdFVBz86yu7`H zg@y$O2><{8EC2ui03ZX@000R80RIUbNU)&6g9sBUT*$DY!-o(fN}NcsqQ#3CGiuz( zv7^V2AVZ2ANwTELlPFWFT*({Vj%brcUw(Z-vbL-yCySMM(z=I1PPQ1ABQ!HeUU+>oN~&A0htsqhIvwUYHFt? zyl{p{=%7%+sjzPU>A)zAh}6XpO~k6Jm4OagsHuo9Wvj zIRF3)2r*u%(PoQf6CLc>5fPjGq3y0Fpzw}I_mnU$ti2*c=&8YGl!zuP&x zrkm!gtqCJs6CSqyYFhy^j%aiSDN`04YZH4InUNk+u<3B6?H=?lu!&}510K6>a>u=r zUaaiC&h~5avTGb;r*{Oeyle#CT4cxyGRLZd9~dPwMb49EY|zG?#(Pm7TkLv4I}YIs zw5cc~^z75RaFxQX4*ae4=#!E|Q7Nc1>dQoesMGqEkqK56a|&0%q@Rzws$^;Uvjkm%@p*+ezlG2cFy z%{=;$;q$ktI7FD>oIm&f@E)VU_mEYjVpmuog84d^KPst@Ana?PmP)4``AP6ffye_y zA^@@rYN-OBfJg_%*FFFW&`<<)2qJa`h~F*HgZ_BnKq3gkD!s3Q+N+_JxFI8ls7r@f za$!UN`X@mX{;FtRlL!o2u&Yei4|oi0pbP~SXu!v95RSk zHPDFgN+Lw=Acb8SKoJu;-~6Wdk1Fc01whD77Hc>F5IBSeTma*h22clwY=DF%6qLJ; z=r%Pzq!YX1LI@}#$M(_Djx}5Z4fL2pL5jkMg2WOc8gd35To8>Mn}`8-<&2TVF^c)S zr13)O4<>MumR1@C;TG}*V&RfX3|Q4dB;kNlDpF&Nq?{ug5`tZ60Rk1NB>H6eODa{u zZ`avl4Tn&+g~$K^)%1r8ptq1R8q-&4R3Z~~We$r-lX}#wCM5Eh%}#pVFY7PJm z3_(Cl2atxEa&&0P#N9Fx(g#`WN&yG>sX{Hu(9k(lkblrYQGa>Vq%O6o3Mr)rQHjKd z$YGtZ0t6^B0#xA%6_)@QD=y2r5T&vdtqD=ZlIC6&hDT+;xVD>(S=xIS3O`uW4AiZuCDK^f|RUvv|l&tl|o{4B`rKn3qjO^ zQMlA0?gfpD5ajMvxdvhG{Mgz^h9qDOx-virH{#gV0XMaJr6qX}VqX2Cw;<}hsX%j@ zki_Z=8WHG7eNlJc@8&m@`!z^^A^cy01Q>nnRiF3;;&FjhT|&X2p4jSzU@%xF%tn%Au6Ms#5R6CFHNkOwm4 zNde#nKWGG-_sr%Hh#(XDG*yuUdt_5pKnlU&v!WNxXc4@i7)jPJLVRFTPHiEpIM%M8 zG9U+@VA4-&@W?(k0OkiXY5-sGh!74Z=gPh|R$}k~M@ZldSPgo%h31qBWPOtp{)vEX zP=Kx~Fbt4HSxcPuw0Hj$YL7%>0k8@)bjG|TRDUD|d=j*)j|(d+oU0?_*e$1Sz1&<= ziUzyvHFQxp)gnY0tPsos^8A}ca~7@5tVDZ z;GK#&NN8?zBcDnEZP+<({}F^;Nns>KztYihlJqt$eIreOy2<}U^^RDb<=ZarskA=x znzMZ20%`c4z%F(I3nbXOih?}QF7G$3{U&Yq(%Ur>cT+*VIa0?`-8pjiap#@CdzZOK zxc>K>n~Lx`k$9gfCjwoWKocZMyOkbqlgM{z@)@aor!LQ&%nvf)RbRR1vCb*JkKXU& z2K+w+?WkE&s>M`(*r% zkUsgQ@09b^?NL3zG*%9%MM-2|rR~$C`&;sT7ZHAy!h6f%dseb}8_{{__jwW)dRvEl z=%;+l*C+F56VT@-Mw0`&G5{T568uLc{x=f9>Aw z(taHAeiImfoN|FRk%4UzFZ3{Qv7!UQ6noj%CL<^lC72~ASP>~`XLIK#FPITAI4Ls- zfqmC?g!Xk|mwG%maf#9g=b$T2&}kregkF+_F|mYL(u5T8gp9I+$0^^QP_xn{z!!Y z>5BqMDFmqz;z*3-NGawh6X?h#cmN0wBMdA5frt(R0EGk*9O(~L#$`e|lvw6vx-w6> z_$Iq(5xq!v-l%@y$dU$matJAZ3Q2!~0|hLhlLf<*8!3%2bWKU=CQDfnO(}&>NrP^( zk{QvG2lS^H~w~sf+gslM^bF6fqM&)Af0CjUv7$=ZqH+16a~Y%l;sBD6lmAIIHkvYmlu~7)qt%&mK8l?{3Uduw zkPixA5juECYMe^yFU;Vil)0g@*`ddi0lM%Vl-QXPah-vgoyFIsQu(D87N*TNrY~`v z>rw&D_?k7Urq9WysPhL{U<(QV1E(RXlssydSz4!C%6@o?n0m@^d}@Jy>JosuF5-EJ zPx>@bT09=q1Uz5?*Yc=j8L5Lgshnx219_>WnV9xDoMIY?(HRVc+B~J&P%wlCQeXp5 zlr=k=pm(fb+(xchSbAQUkkU#iQh*Fva57CAuNtbY zfT|ugSWg+F7ptx`hCHjHWUIcF+jRa;6Ads0s^K$`e1V8knx?nXuX=SIDn@ zSh0V2vET_T4?qNA6HS-88nNR#vHc2v|7w*1`#rJ-3d^Dc9>#b% zyQaTX0j+>TK5KsZs&_2Qoi6L0FpHIK1q`U5EU&kJqY5;?>Q&WrMYm~%Cp)Jp%Oxu- zv@J`t7HhN^8)Hgatr_RE2|KU9B$8`;YHb@4bQ-J5x|YmJpUzsO(8{St8(jjy2&$+o z@J4iHn=@y7T1NGkfqQa;3lW8@va+g@iL0WEOO{58nvctF1JMnY)+tP&f0p~ShZ;^| z*|~K0xepP#a67ubS-L}dx`COx61uuQHxQD$EJMJzAnUh(y9b-8vbyUKyjyL)E1$qS z5k!i*jhn8W+7h4tdL`0FeXv4ovui4lU)1>U|%f2SpHV(F?cJ+Yr?&E!T^p*^50Itg;*&tsX3C1krkW zTf(M-z$~G_AOR1s@&u6}!H`P9nP|aYg26a!uFHq6ZlbQ9%C1?GJA6>H?!4c;q2KAXA6yW-uq+jD3kE#Io?LsN%!HzRDvE5%;A*u%yRUMqwfxJq{tL@K zTo4=(0L#*0fNa9bb}9?(5Qyw2zdT$e%b79E5Hmb=H5{ZjEXO$vw>#{(JuFoTL21gu z1=0}BxIE3KQq2u<&3%&1;kC`u+sG*S$fO&|<2cFqS;?z+Sg%k8%R&Zw>s#?$Z@b)v zy-doB%+37FoBsT}0PT_j?Vpa>%iQzBY}XAPjd}eMne8T< z?Qfl}d7pg{f)D_&+s{GEa^c_+qrBQy>?U{s3ypoe3}Fqby(?s}p6*21U9E&)-I03# zJt@5%b-#@z*iZu&$p`q51fp$gPfd5!-~)s}AV9(gmw*JUhS+u7CtL6glVFh~asrz( zU;w;f%i>cc_0tzXk^bEz7@(0-I?vzDDB+EH<9$4Fn_T%lIs?!Ek*3ZKLEStJN7)TS z7mxrq3gNoF+(tdt`Ak`FoIm8|%Hzf10v^i?!4AdkHV01PCjQ`^LeC7b+if!8y;tGF zbK&Qe;hwYM9d6YQ@d{T!%Of?}JbvOo9^r;U86U}n62VsJ<&kc-mP`&q@L=W)#nY-3TF%}Z}dupUh7w$;=Y{dNWJLC zo#azZJ$oMPtL_lZv@B+T?3v!<%5Lb)F44eDR=i$ZzOLcGep|xc5M#ho%2EMA5bMbf zthU|}pUx$rK8d5Q;pIMF=f33W{#;|{5FoWIB7^A{MCLbz&F)-#r;GzOk(k zkW}d?pAc$pG9s_JBoB8c58gZP+u=k}3Sm*zgi%f<^O5e@6H!pmGB=zv?<3Ff_CE9y zUi9L9^p}nh`V>z6^iTUPGu5u{*A5Zl!_yhnG_npfKi^%s-SFa0*44yK4Dn6jG)@d* zPEJ4O9!?Q|4lUGB_1)g)oPOxo%=Hu=PP%lS#V$y}#1O;;>45L#6#)Zgv@0zT?<8aO zK#%n$@9^V}OX=VcVOveKKuXOG@J}z@6|pp4g48ae& z|2Q)c;0&UM~G!iBqUkVC(jXS+uIde-t~ z%$YS~OuN`KN*Np<6!}~_p*D^%xmQEc)6v@_<)vrD>L|xabhWJ|zK$9@EaANrdR0t}eeUjwy&?Y|NSWbr`0?cz zPi|guAVVJs=(SJxQH_rp8L7XWw2O$lz>E@00A~2w;tq<+BXFv!czbNA$jkz6DB?}vQmeJrGEasWO?INE&(Wo zhx{}P1BhdepmI-k-JJ>Etu*0_0z~XR7T2FS{6wt z;-r(Y%@#TWQ4TqagI+{%yXtA4iO3N}Ou74Sz>yxMnT#j80|~nW_d*1le$#|%#eZl< zXUZ$L{Bq1Q*L-u%JNNu^&_fq}bka*V{dCk*SABKXTX+3+*khM{cG_#V{dU}Q*L`>1 zd-wf!;DZ-_c;bsU{&?h*SAKcsn|JeDcdT z|9te*SATu>+jswc_~Vyv2R-;)K@f&egd-$j2~BuH6sAyxD`a5{UHC#6#!!Yc zq+tzhctafKP=`C@VGn)yLm&oGh(jb|5si36BqmXbOJrgbo%lp3Mp24Wq+%7VcttE` RQHxvTVi&#m#UTa+06X*r`HTPn diff --git a/docs/theme/docker/static/img/docker-top-logo.png b/docs/theme/docker/static/img/docker-top-logo.png deleted file mode 100644 index 4955f499bdc39daa24c3e6b779283e113eeaf1ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3426 zcmZwKS2!CA8wc>%B|>qG7*%bJlA5hmdn>hx)EI=yLvmrZ|ZwGxxvlgPOc%y4!F{vWomsbb&KH1&1?q~i!r{o z$o3|2k+NJZ^#Sdy4E0;Z-vSK>arcLQ$_O#Ya}vKBHSR$Tucvx+#WJ z3>6r8_J7IzIrJnwF(LEAg0r(I{rW9Y(a3TFWeq`GxuL6iV}7%`CI|ypzX*5p+V(vn z`t{D7DKwm&UG!E9xge1&dS43ccve}HulcjastKq37c8rHSYH|4`WLe&a4B<<+qYeM z9)>RUpxvBbY}KDPD#%Uux*5bmq<$%>Hxy4??d=vuO$40^gcUkOWqT>FJp=GjhGj`! z*@9h_xha8Dh+=z`Qgk@IKkM4s&VUR`CZ!JU8^3)c7G?VXQ<8F+_(Jx-L~ZfL=5Th- zvQE%-nN4B4__Ov|nW#g*TJjTJy%9FUoC-tevZoS>+af>}Ptujw#19DZygp4MF&o!; z=vC$INI1I&q*R7;Pb^03vX7Xy1TiJK7>HROft?cRQ z=H_#0PaoN(Yr7j=kUz=2+}ce>H_kL321L`oy1d`<0D4->q+nQg=u#HXG*V7s(pM7d z#_gOY;}MkVxm8lPzp>WjgDFk(e46kihg?9c$O%A3$6<6UFQGF{c7EJyzI%bTyoCq( zI`MhY$}sYsa5YpiZiaBAC0K~DIZ8@*+9PotMyCyEC{^@d2yAWlLS`sTlUx=EkWizf& zTY%@Yn-#g?M=h=5)CL%=5Ed2S?&&!V&$rnw33JfldtrUT5WlU$wPl%Ri8}h&HrLM| zxmDgxvnhm95T4zUNmMbGj*2?lXR^nPdY4EH=!?$?V@#5)pwi;pLF_L0m?973r!sdj zk5Nb^UuAJSr=-Q>oD;)zfHy`sIglz&kkH9n5D5oi@U}%S= zwEXEaT{~@U95sbJbzK3B3R6~#ny^p0$B*40)yU}B97vt!m;kV{?{17+13r07`O39? zD25bcn*IK1IM~BEw9oQd+Iw{Ub9;w+>bYjfN4^ZzHpT4|;hT`UO6P;fgnETkyPCJy z0pHQ0lIsn3rE9HwZxgxRbnuL{2ypyn&!IG;FJl^25rK%MZ=`p|g;N&sg`twew2Ghi z+y67N&;ax3>u{35e7}ozG06*=K9~C`*yIqCIhN90`>0JZ&?aENzs;)0QAJLc(eL4r zj;eF*m{YBMXp7bEOS*ZwfU}R-} z?7QRhKP_2v#GQOrPOtH`#5dmC`<%&KX4T;~GF`|R#j`p8>buSJiWCgnlv3_lDKQ*( z??^w;IC$1QgnyDzt$DWGfFi`TI=UXA=d2b0;yvdcX80|MwiLE2@OtJ`_4ZTVTbhT{fJKbvo}<)5w$?31wDLgk*yPU#;`I|7Y_@Dk z9bk1Zj&`J5^vwaFCdF;TK(8;_>QZFNdVQPZFE|?DKdGext@ro68J|xrJW%5WN{AyTb zQN`LGsQc%ecm z#3~yWkJvJ@ppQ*$Q#DhIqh1S23Y)?s_^XIJSIKfx08I1q9E2=o`2zA6a8wXYkmozi z^Nl=yEnpkMIr~~ESgsSA2#;;qzwS>hKdNv{CTt*f_ki>n>?`TEeY@QE zRyF3_s~w2V-tUbmh)+cL#DLQsrH=R8^_-F?|5_Q2{fQE%hImRv7zSO{lklx_5~v}yLVo&aKKcXE}xBl@);gGVQVgPF)=k+tBLCud-_F@1p44STUUwW1NFLGRL^^2LSSwCb8J!Z8Yw~&iQFBI=rYsxvZv3uh z9=AO0GkhSu(m-e6{!5cg&^$8Z6N|aE2l$ZVQ@q`7>RnEuimbc5!OP`-MIXQ;{8Q+| z_lX;4No&GQF^{X0Q^2#2uv`ta&6rDy>gwvb$g`E5wRV5shPR-y9mAKeyfzhK9Lx6` zvOwLbCvmj(t0CKo!MLl4M=9(u#BWB#{cv95jfmPO?Xushg)XE7e$k$rglsjZYV<~iv1xtJAA<4+?tc`fO91U9 zESH)jxT@wkY}IPbvSjiSykt6l6%6XsAn z0waENhrXXQu*(F^&2p&la!#SiCV%_dMG5m5y{DG2Y8ljNH=z38so>$Ej z2(g9lp?1rm4vgd3agCOAyXN1#^n9E~@kqOW+Hm)T3wD1)8~8vrModdf%W4z7*&bp; zwrLI*M>lF!Gt}ZZE+T0IxEigfGey}|&lz~-RCi$>82YyFX{ENEm=|>v3D8NVX z0|iDFrHe`xZ_4n&4WwT9x{US7IOGdeIHDfC-am!DIH=I6s7GuV4F|y3`$OK;FyByI zQam|i#?*-L$f^Yfa*UT)mHEtGTy`s+&n-N5F-R1@UO`-a|Mac~+Gpc3SUZyWuLMx& z@$1iHvpKBaFjp*UcBC}1N&LFp^CYT@oyo@QQU0>mhh;6S(rILWydqa9)9mzG3NR;0 zzTrGk$+Do6)UDr|S-BR*qeOEhOaCmpTKqknBmxors@r96Z^}}!=|D0hc!$1$lB4=j zoQa_4$rUe$dcNT>UI!JP15iHyMyNZyG=8y?RLAsM`?ZwQ)s|Zl0js9=mF|P!QK?LO z-L;4LO_OU^?pnD8?EV5=P2|C~&aa|jzu#G!z$zsc2H$dLT$3K>N4X6j3&IWAoUIB7 zzfz|gJ4CHNfHv;{jdH((S6Rs!DTG)CXvd*hnEqN|Pv>oN-ju=Z-{PRNd14O|gy8dX z_hInOJV034M7Dq`n7tUJ>B#|fc+rIvsU9uBWi)x%m)`O_7X5pvrcv4lpOzo yh7FBd{rYz(v$g({T(lmwVyxD`tc_gJZ~=-s?9joY~H99LL8hrnHkZETfcNr4@>k^V>MYG^DPZA8GOOOzG-=FvU^M1WP@4ud95#d3mCe|hh z1j01AckSItwF^#9*i9u<6LuZQ z?4W+BA99J<97UXKWL#KObp6JS0Aip81~V}+IW#oHE&j0a(JY^{KrA8tUI$4ftcGH9N-(3JMdaDc3SZH;v)%n-mQA8 ztzW3|k;G0Xu_~k~dD36)J^L&&Ko@)-Fz#O{aCWGpN@PC&^pXmKf zVe?;E2S(9bvwd^Uw)nkE!kpOeFP)Sw7?`it77y#NJ8r1_I$x=#?@vZfK^0agiRgn% zx&*ec#xtF|ebq|G%k6y!5B+pNS2;jdC9M^Vi<*M^K16vZCytb8@q+OX`Q6JWvl|pF zLEN%fVECXB@2^Yke1ikgv&Q78cUuRq)Ggr(q$UxZnhGwE`BGxZp8*HP`w zjWq;n7W=Xx4@+d9GSNgCoik)+XLDNnYy^CgRO-1*eb5}PZTk?Q%W2gLmpJu|M^}r# zgoIiL;9sEBblZ)PCq#jmK%1oQIm~M)HK@|5!;zy^fHZcY#S;U+bpT!G^Jg?kSbILU zU7eN%k2ruxOIUrgIE`wZGG!0iT7Rr3vjq6kMeRMBN$XXdF)@+ zQx|u8X-^snF5Sn=2cs4#W=z>05z|_+xICkfE05`^NTr#=d}#NudJP!nt`m1{gy#FT zu%Fdb^cTx`Sjl4%ZwF)md- zEl5KO>rKP8?0n??5|PxHu>sx~0~^5G!26mt3?qgk>=gr>OFV#Eo9qb=&h3W1NO0s! zc7bS*V)tqAs++UQTH-vzBUy0>(XMI^mf^x_LP}7wQ~5Ziaw~L;Hy^)pX=~b?Q2 zZ0g9mht{ANc68JhPV)g>U+Q{z2^;|)*2TtUphNd`U9t?VNpS7;#dvv4hdo);YMJ7L zE+KF3)l6ZQ`W=TOTK};9!xRB1p6cX0n&EQtD z2sQSDML0{(%AzCLGgG&$A)8Yi&mNjP`E@c;QIH?iA=OR#JOPh$ZE@4Dk=#&LgE`Qm z8fE+5Rz(P`(2fHsJ0>qD_>zw3{qE|X;vg9heLn>Y=JHt7DZPKj>b@pZ_&9XurYRWa z{~;gdvsexD!39X}@zHOc%oU=XVQ=kyZ{<})c(fm>37}3%B;?LRR}{}aD%$k>3TKCD zQ-HKdZW3Zg&va@w(A-|T+V#SqKc9V6|FgE%7#wSw-6+cW-rZR}#A!Y(l8Yao70*TGr**gl<`=Da#DJSeAOv_{N> zV%q)Z(Z`XRp_0{drZ!vj5DFw>Fo{^Gy03}y^ww^06YgxI{w({LFSbD|iTN{Q) zNZkFWuUx$2irpyk%<@vgqg?e0aXN}NYbpMjnRqOYM6s6!NK7!5Uf0^Aq4C@3fdiVE+vP*Bh)P*6|_urZM{ z@qY%bk^gYq6bw93Q1G7r{X<2`%tpR}Lg^)|@1^Z(BGRMyPD;mb^#q2OLVEpS|Cc1+j8|*Z907BZ+6yU-mlm|K`Co8;xIoc) z4Zx`rl(vOd;v-fTKDK;CU&Id>a>0v@+`Lr=+k`Pml#RA-g-lcm*;eoPSeY|_(-UJz zalw|wIzH)1=9?Pzc93lJ1iNk;omU3-=6RkSmLAG4_8tN6L$5;<92_0j%#bLf(4!Pp zzvouq{%8E}%zr=>RJ6Yj{u^Td-7m_2!nb{YXZ{oPng504|DgTfdq+i6Xoj&v_C7J?#_qau z8Z^zY8FVFwBk=(YP{g97=2vyDIl&%$*g^w0vM+^@^Z6F>rPzNF{&&v|geJur^@*kr z?HVDjlhB0HBLyrB$4B3721MW+iV3!3T9zQ()NcUvAB zCmkl?I}j#OyV6l{aD${6G1q%spW)%WjH5If_&)NDwf-%;&ii%=kxjiofLZ)iFmmO8 zN%)@x`*-~RC)tQQzo>eung$mk*FM_h(Km4ntx1}?xmL3W-2TNO)k(+5XpMg8eUM0J z$7C)P+OV7sV-K7hbj+o`Xl|*kCERu8f4FR&Y)d;Wd+h#^Ijvt?C0u~KRk9Q^PxFZ* z<8(KBdYx`KMbXhSSJl1TFG%W~N7igE9gQZ&NXJ?JC858jGFo9{XUXFF<#&fpYp1ou zoIRt~kjaHbs0Q-lrBDu=p&?y_)%-I|b2Xiw83K`%=#I4uXcDb}0H2A&5!1&7_sdj5+y%b~T z&H>y0yxZ6NC)1t#rY?!>J#SKad9Op)Np5>45PvL7LpEvE(zIzqevF2(lj2emiQTpd z&$xg5@Dqs`PKN?2_T+f2D>s#ZGskVPBlKZ{@Wo$-_<&ndb`=m~NJl0PS4DV=w=eBa zJ-^fH<)rhr)rt$BhbZQ$FE>FSjM18Mq*iYYSvK`Pw#cjjov5zcmx3$aIploHd7)1j z`Y^3J{UBTV^OEbn5Ij<@7h{{+!YE=l`> zZl%UIX6q9q>m#pOIm!`jsw#y1h%8mTAh`gD54j`wM|c1-hD~gKIq&&#SNS;I;^zksX87|iRW4=8+9Yg$PpYf&gQ~p~7poqdU^*!1 zDMsYCaz?Mg()6hK{H2*JpvJX*27Juf1xH#Jc`a`ObD=BE<4(_%SYOB z*Q>iu5l#aEw(Rn-_dD}^LnS)Ug@I3*Cs7s@Vg?LM-~DYhTmt0TDH!hZWC4G|S$)}}47okZ24p?FynUl@ z@84z+dUEp`gC zNKjBlze)%Gv_kTW-|b}@Mz3q{4wC8ihB!BNZxI*5D||~$!NJ#up|+tLzarPvLPMN> z@x!Z@Z%99P)4X=Jtk{hHZZF3|0H^Kr9XYIZZh12Nc?ge%`tKsxVPy3d@KmpvYI)6x zpsNK*F%9T$c|FAnYWN`oD)K7kPkh~xvBN2c% zwXAwBJ60O21{)p);9u_N+B!HoOiYwgLiZb0FXJgoYg(e7VEt8Y^e&Ydwt7cZiQX9- zD51TE+`~`^9u80*S0M${)PNs{Eob)+RGV&wk_hrGI?l7rjQ#F7D8Bf znnA-fLVSJn3bim*12sFnzf{zNkl0*L*Kk`BHAQrAdLsqZo?b|9+oeY8uakNy-I1^y z?U1?-0aqdH%qMZ6W2mHUYcgA>LySR#-Ai_=zbXVU62m*yV9Se}W#fY@<2|0(u#$eF zu0vihWM`(OFz zxpP9ambyMFYTDW>ufC{!-y@w3jXL`i_gAj_yq(dhe<7;b>HOs|vvnCMWA!g?{;ZHE zln8k~cIMt#50p0Vy_=^Nb&GFGxb5k=l~YIja4O<$cGgEcYh_80xsmJ?)#j55EIiB> zTJ+*q;TQLQej2L$*!By)q)wM~->DuNviIOy?Rgrk9auiO(7g%#>05Q2ER#PRb8fwzB&RO4?WICuvV(B(bFaLgyxU4KX zMQYW9J;dEfT+g1ZX%mea-k8E#`E>VLTw_9F9h}kS1w)RiC6`enR37eMC zqkzk?SkcL<$6v?reRgv8=H8I26EK`r)cE72clTAdDEVW-O2Y{m!uMeRx-l5Y%tFGq zQ0RVX3LDaCM?u+hSiv2*4*lan`#=dWaO#lF z4*YfnFU`lt+aT|p{df0Nd#!y}7G!%gKnf|3uv1<5+F`}Rz1N>>-@6y~Zl0W<#XIMf z_O!b%<7+EAb!&iK*Xrj%bXG~%?SRlBp*TE32+CC5VAOy`yB@WU4QKBI6~eN@I*nC( zG}AqR|F;LB!<2AoB+IowOMykPN0#7?81JUU5(i+suYf0DC#FoTcBLqI;ZOa{>Lx=x z%wsBa->;(}Z?x1>DAZ+7Uu_iJ&}BS$O&d5_-0idC?QOe(*M7`fp^|B<(z>Kw8R78~ ziFHEmiR90N?XpL4pL0UQqG)j9@g_f)kR9BICeGQEgw4juK(gAR=|^VObL|f+b@q+H zjW=4tQzf`XF3ABEj7o@r!-_GD_6P#or?RQwJX{YSSs6}Sem`5MM8KzAE;)JYrmdk# z8yGLu6XB7llq~PFqdRXvxt`&^L3_O8oNy`SzQ?%MehdgOzt$V1z0*Iog6W*MrlHB^ zc2E)wZI)h&_b|%WL6kWbWgSC2#TS1;i|JE&T#}IT%57@?+sT*_j8&39S{xle5-&1d zpCg$maH2XvvO&_usNKT<#JXAu?4oZDvB@>T(UE<}DITL8`4ZA-Q!x=%qeEJqoL4Et zn$Kp7D8w|5P2Xx`oazY< zbV&N{j2{C3Q-t`CXlt0aaU7iNFWI}pYF0Jj4O(wKN{qD|{d+5nZGr|qzqCZ^8_mn% zM!1Rwqv>*|=;@iJyZysrhq*}+N5Y?qreQ3xpWn7WNGNjX`iu{=3xe`8Yu;8;+huJt z^Ok+CQ8FJMFSY?@x-VndBy`yf^6u+&(Y`ggh#pEhw?TTHG0uOs)EC9 zF8CcQ!Aq5G5ikqQqZT^?Vhr}?J%;9iJ=0a0 zpLw$L##o3C^Hufxr~7CzK~ko*#dPXrB}@oZgJh498l>6KhXE4Ell_>pnI{ zPi##;Mp9OD1bNQJYo3am#Nd*z=B@R5XtN0pG3efUxTaM0l{X|eQ8q)XgGHKf!#0&J z#ffTK(4qz$v;YQ*xOIP~LMQD=boC0E2W2x-t(?`Lu{Mh2D$*no%hD^adW*4DhojQ- zIXu(ry??3BziKo|$$6*TpQo8#9?l(!m(ZMX6L(I8M_yP*a!qdw`*_xM9uhcXrRA#R zy=+t#3O9~*o@{*(F zSa9IJh9nG<&BgH=m!C9rrj3ZvylhX#EXgie zspDwVl0Q|%2R~o(mSML$3VY$r>P}7MF()Sa0%=o)h07)!c3G_?uM}rDa}?%sE148z zwp35AITTQZQU^!O8)_yZvXukDvXng8HNIl?3BCA>0lPP&!NSz)W&G;wl$w$FCW3rM zhACUpBZihxr0c8k#I|0LV4dpF0ul*NWWS#`Z&WxqlMcI3T=sQQOT9$?-q zQauP=l-Wnbjd3)VOUoIu7B%yaQV`SWL~ejSPVLuol+_)aD3ct&`|1vxk*G*5)dF6= zF+=%D3Q#hb)%j3(JU5-0dr3}!kc4>8t}IdA!UdY{pdq_deL3tkEF^G^dXhdg zy)|bl6e<_rHyBb4;Pl&dV=C%8Sy`pgWG=cODLuJs&@5bJ`!&yFuFHK+cNv`!^}zub z7UVc0&tp7?>(%MjtB)+TgF?A#)@>^yr6M&`znYRP5e3?;OTHQ6&|d_%u4}L!`>lycbLf+$O+AIByCJ%}no{9m=?km-U>hgn?_ZPDK*uRjyY}oKpfK1FPwEXpt_=Mi8Ymj+qiZ z)+7?PY`3SwhU5+<<-xs^+3k-s+P$u=6|E9!9?$jLf2`_beyVf|@7g+iQ(9wk2?++< z>W(S-Ta-G_m?`U`%+DmP<*M;q@N|k?@puC2i0)4X(Q4!EjyCd#jmv{{K$SwIxH6F& z%7Cg#==fv{IzR9w&X%+k%SX2YRG3tcUb6{c`%?|&d zm!NMc21PVq8#Y_*jTm?oidI)wF$uKf$=gquq$)qQu>HQ*cz_m;^!VRi{_BgYAbra0 z`d>`e^_&Thch0;u)`8^+tzOq8@Xh5&RM2Tfs2_b@tkIry*Alhec^ww;sPLpT&jINk z!YSgU`=y(vBFW}i+maBzYp7dS=hUJbUIHrPxKCj+z!jix8PdsynElG7;aXp(Ua)w& z)F+x+RhoidNav=3_`$}P+Nohdp;O`B*&}5GSPgu#B2cEa+d9#tTFHf_YXcgyW~jWg zkAf6uTdIK}fi^O;HK~<#{-;^B$KX=hYojx%NW4=SV)gxSk=$nsxNB0Tf=%?+$;Zd& z{t)jc=L)EubF*PjdCE)!C|Ja@0KiJGsL2W;EXa{U;L1r{sVPCA=@H@SIF*PvYzIR8 z^lR7Vi>_dcy&<1EjXsJPvN)^)#NGj$5yk?-L=mz{*swDgBJ=$idwxfNqXIhLy<`Q;NjF$>B! zW0PfME?$X{htSa<<0m{H8yywr24poxc+1V6LcL|qUz+5FulCT_G*2Pu&pdeu?E0|{ zwDi7Ugnf^QMf$sS5^2y#z0^hRI=sIu^CU$^W?$_ou z7bkGVVB#g6R4|~~T+8~ZGwESQuS92r?;~n$z}#sWLdp0GEqcEzo*Z&{;t}2hHxoAO zKP%9D!;7#s7dxY#fWd20UWGMIL#|hCZK%8Nc%=?Y-fC8t3@En|PhODd260-k9(tMFlE*PyfEk558K&DwdhvA1VOvjrE31vMS$-n!(L-tH>=9yrd zs&I$SpzTaDU}{8DD%eozO0YW(yon1%OY)Nh-e%!iMZ{S};PoPR&rRSWoj`l#XELMq z5~kkghqMZVqKlPit5y3He#!#YW*evZ<)^_0SNkt257(m0!`=-hd$W)*V4_Jpu1DYz zoK2bJy_%9*Hh05BFRLh|*0CIiui8Qt*xjq$eyK|#VlzBc*$ z0}X)Zl9Sg*8^kvR)9ThQ6M5#Je`_08L6utDvJrdh(&mAX_-SaPmM(Xx`&BJdvNw9!#MKjT>NC>Z+LU{412sOs}Om4TcQC{{2pA4eItihb_g zExVRZtt$3%o$>KAdF*rE)e64umNVT{r~{P;wdDGD!NM0()Aa z&&X>0F+JwViT(T9xW~)_H?L#Z%`GnZl+q9Lbnv~#D^$o=WMmVY+1SouHD=I|lPnFE z@*b4;)iHC1!2^>!S~0G^y##kd4^5w4SkZd#4WA#5^^_s40aIpIkmrz2iX`BhJ$U)AtzDz-4k_O-xL4 zygCs9es`V@;4v4!zBJTS2oTZ*`-3SfQ!aj9>;TVO+w_{5jL~msKQ=NdZ2?URyIP<& zo@8>~yL5*23b} zKHkjx$u{eX+L0-4ni+;-Fi=&Ks(UlYF3`0bLA!~zJ7f?&G~Nt5-Bcfx!FeJF*7#yY z>`IJoxe((5e_4QuiKVK%uQ|OX{pLKt`&;jz%f=ESP~GwB{9K$p(Bx6Y@D{B~^mC%o zOiMeywS6UX`KyDL)S46^SL+Q2XYl&dNx(kRIm=ow=Dh+fs%V02ImcmGs>dgXBF9^+ zJyx^QhMd?Gvp5zUPUza)d2M^H6Te$QDyI@E`j1J_2;q<)3YsT#iS{e|#z?z@iQfGQqkZP|Wj>$>HP6laj(XpP}O z!kZ6U1INGOi>k6Z80-_$=?$a!_Xei*KSI`gqaGyCQ3r1(C)Fhff=DygV|8A_f`e-w zL7A-IUq{9@hTPF;dsh$E6qmWv<($@R+(%XTZ?7~63*eGEv>%+G@p+%O`rpzVk@3H_ zby%&-Ej_x=7C-oDCNE#SyIWFN$7LP-BfMtW&;OYLjSYAt6N#}3k`l{9V+{jy={@k?n`APAqNEVqTbJ!iyrIWW3)* zMltGc>V0pst$}73;LsH4I=V-&{QcDg#Pn~4acd-0Li%+$r9aRdUs|a}n$lpTa_3^Q;ZV497x5|_ zp%#bt*atWk(GYqV(UA!3-CH#O4Tm9vL+$>(htl(cEF(B0Z|{y?<XutbW}+Go5SQefwsT z3x-z{Z!M_{Rf~&DuTj4h?rwsuK5QKBGoJa`bKTS@-l*KC5!gU$Oo{L?2d4sA?It#^ zid2~ErK3PA<}&Bu6{xMCgW!srI%(MoL5biz>ETn;b{2OC#w72Y|GYMa?d0ljz>z(q ztHa$b0-7ie2_%@KDklpv)nwv`3CcCUy4MWjh zUv*=uui(UgeZ-fHZU-`!KJ2jIylGWQ*plIXe)@CWc9W7?y#KD~KEhc2_F5}aOlkFj z`*bzEHTYb+pzhgjh)96~oia{PN!q990!?(Q@E%3gGKr~=C8@(J)0NQEy%R!L!Op0( z%~ejs=-k(e(kk;x??fWHIInH*GoS!8L2bV0sA%v0=9X^~+-l;;^lrtv(yycxn$Pp~ z54yLKl2P!OxG}@0BM~Y5J+fv)2Be-VEn_r8SNrh8;Fw^WQGch1Ueigl{5H5#^^1Yz ziK7pN(8{U5K(1DKVFpa=yG|cgmE+{7J(y{vawfpv;|v{}X+eiZ)Jl%-v`%tqsfcvX;-JP=T86(XJE9lCmTSu#fC|+>>pLp%wA^V~u-XB7d#*>P(>K1C*6q^dxs_2vR+L|)=Bdrp5%A?zI zj%1xK4V+SyA7Y$kZcm}}eyZoE6Mgb#XsAVR%QdiPMuadI7tcdSLKr{Hiz{A;vw8#e zU*8AKu`FAi+j5i@N;Fw)eqJ`n<=KK1*{vjbogiZaxQt@IA>T!}#LD~5y+!$6$AUUg zk8rXu**$sRwZ~e@1{TK|vUjO1Nis|b2rR=i+jF0BjfLd~RMHha2Ym1G06|^f!-0u3 z7g|ddFh@i6O$z?htvliU6*_y^+q(VL4gE)W35$^^!`RPV74%POO7+l09Ea~A$-J_% z!re%vRS<C?Zt;#{PXtQEcQfgKRb5e{M>4{tf;me`O~EBsORRI9!{=Tnn)#7(x%XR=Eh%t8EnWJ;!>ta)824$7E|^Hr96U*atGhio}j@!GM;%e9fawG zpfRefCque$7)bXC6|Dfs76mNy=!}V{=DKBm8)0kKhfjUZg9hxw9?$6cVgrTmN9$6z z|01cKp;j>1}IK!IGtmN_}!(M*LJLa_PY`$EJ>Rowy2_AN=Gz#GhBu_Rt2( zl`D0&z~xNM`h+>5?&8&(`KKi%LGq_ZygcsH7j!yj`v#1W_^psG+~1%lVPTO_G`v?N z6|9ucvEILb`e9untKCF47KpBnOqWT$iId`;fjxIfFP|s<8s2$wrYXZJzt3oK4J|($ zKK6lEG!a09p@TYUldqe!%DdJvXD3;z*c|6jmd>l5hNYhg?t*MmNqD+c)VAMni6EYR zx;D63Nl=N_Ke@}ht)x7RmXmteMsPTof2G+ zwarndah%H4`1snd;PmQ(p-bP|$_AVcMsQdUg;%bXZ2)*X6d8G~>`FW8q^qVRk$yE1 z4@ty=M?c91z1cej*S2**6B$Hj$>S66FGodLBtcPltUCDR$~x~%&h{SQn-@L_l{-|) z>1Cg!#0=uY(se_WS%^5FqgEsR_LVdPmffvgG401d22Lwxvq1YlY)});wIHOakKSjJuYt!_he4E&3Ci8m6 zp8QP6M5X)KO>s?uWm47V(>It@kYAS;3zWmc!n7& zo|`wq{=J=e?c!M^5DI3W+^;hZFJHK7+O1<$dE4sVk@duj|Ex`RK1I^RJ<2a&1rW(=MdeYmHw@OOhu6dr{0I?*rbzT7ZuuzK% zf1kc#Y_iBVs}tgv(OH0yB?fJCTrDKLx!NM4>Jt0o3BXarq4e1ul0R_Q#taBME+Lg! zgTv%Q^~tQ$&R^>4@2_;@;5Vky@?0ekE3-x!j=4Mxn|zock!?dkeO+(?qb@z!+^TtI z@CHJmT&l|D8yeK{r=Kb7bjcLtmH6lDj)>Z`+N}ZHTr}<`Lm|FB3y-PxYTT())1H{{ zFVTkHT^c2fEcZLjG$2;Z%HJI{dhV0Dc^Aeuir6-4L-UTueGMhb#=g8)A{EkFV{C+# zUlFIb&w<~5-qyYOxcyB<6X~Ss6T^zF@~fDdt+^mR)&3TTY?n9b!QX<;25rIWDwU0< zp<)vX7vsnO%nJ9+jC zbzz=P%St0dtf|2j#zE@+TUdG7_ItU)8%_b{MeW-yYm=&iL8Bpun{e&V_Rl9aIURny zW69*fI!SHAO7tCGEiB>CY}{B+zQI)G4fv(hvP9L=A&PF|&(b_sEO8q$5D8!fElK}s zR44*W908tPXW*xUsuqgCYtJMXVciTJ@odd4BQh=sxe zC;xzfJS72&?ZQ)V9t;?44YD_75$Gm-FlZE zKf;uF$>X$B8n7Jz7^jG(56Bc}3?%z>JLn8IJAb}C?WQ?S-6S)7(6Op>(rCy@^1?7= z$s+g9P%pus-kRz-H9fub`(L*!342WEw0l;g5<9^BIxIIPGyFv9p=z35YBCOzeE(Hl zk?RUzB+L;*N6@LWa}KG$nX=3wpKEa2YYakIv8B8kHFh14g(E@D8+MgyshzoJ(Tj6r z`e4{W?*uY=v%X;}xmFmg+3pxPkU^@pR_eUaeo_vps5iRr#{WsgWA!nQ{yZW+zas%) zRaHIU(7zyXOEUpEU32PgaSjdq=5us^Ty{-%sOi1t+L=VlXL=3IU4fEqZ(swM?0E6#w-?(1#6cGE#V7ASHA;zvBUau7OT9e!#ez{v`RQ=IlF$iB6x^^WSNeSgT0sCsv^C6lj74IQ7%{4fjYoj z8buCX-C>D)gdVz287(7N)1~X5uvr zIj=!&u8TB2!hXVOxP_a49*W>C6SntdLim!WB6T0$Ym!}+&LuE)nmU@; zMhpAFsplnavOC9YO?xT%ed1~OK06u0ghb7l6?QQ3HQQ#ENCg(Gm0lM8K#LkS@Iq$2 zUqZ-UN8*VK_wFYe`2%+Qzw#cVRoun69H`AuSl8HD^mh+jHHk{APPTHG_?qDB6a)4QPMV#)kUTmSgJRk-%_Su z2Ndpd4mQ;{vB#kpeM?@EaWJZLW}!UU3F2GZZx0$MtI^7Vl{!f0c=yuil3CAR6%gsd zN6l%p+0z8gxByw8Aw;xuLI(u)$Z~l!GV#9R*FAtY*>p5kG=$`L z4~BHmUiG<`#VO4!z~r}m!bFBw&tZ-d%f6FteC@x&(-Kq+)abN8frd8$hF|*qL%$05 zu-d~)7HAe7{mBYB(gP0lMfUH1zF)R}H0h@6%0;Z&U?`KBGn&zxO}$cBAqPyp(G4x0 zq!#zA>@+Q9f{k=e9Pm0kr{W3muA8SmJ{63s)prWH*nTwni9lA$POe|>XP(Lj0M3gQ z?CO}wIQ0Ek+Q#B?6EO`8yl=2;rBL2r6bE_v;?EUO{ z595ytwQvuvANF8*TuMc-31H&w3xVb^WC0=ZPL_)ruadt(&~#0iLs|iPbo_a@|BMTG zCZ#FV1_P&A=c6Gz$HTJxWX|TTBf=5)6)AVP&OkWER-qVc#%>GUt&wT>R+5Y(Bg+WS z{QJYEsQ6-RfYpvq>KnW8_aENwWYwrzBFn=b+1jT98U*WK%9s@QT#bwG>7C~I4c(i2wpz|7|Kg><)+fSu6nrpZy?`CoPMBzq=ZsI%{ z58s>ob?GSH{Y%l#~Tz|l!6)FK$nOP_k{xTH$!M#e)<(0&~fc8oM^en zk7l&FmVx1L*1c?4E%>2+v2;J4h~pI#7Un55#ExhZwNBHbQ)C$^8Q%5Uw$W8qFTE(~_sea&hUz3DX{Q>9lN^y^w1>dF& zoKuKT+c$xN?xDNeBgw{aIRS!O=y;a_Dwclu&p@?hh_bzgU8Ku^?AMX41*MJ-1I*ck zS0~Pksv@zZQE|@EQ?pGeV6wirz?p{mfWl_cFj{76FaM$T?;8qtgm;7YW*pqE*dtj2uvCb}j zy+ac)IP&U-*>x!BR`Z315FfKf)CX5RjO$Iv@0%o-JI^W4pS@|HxZ-9>y_tCNyuX_L zS(GT`;-Wz=W{nKLdr3m%QFy>cd%Ch?$@FhLo&zU=2qK2kEby3OoZpnNZ)sWH((@p}svcAK!x3>`JwvnlKGlmp4uee@Sa<~8xTARlKrVg`e$Xdk03%5(bGbN|qQKw80kXLe2{4t1k1efgA z*mR!$tRW9)OpO!_#YHS`=8c(@p9#DC^)aIe_Q-4j%E-i4EwZxuICq73^owGoYGf>* zGa3Q<)+W=#Y9@4k&W)*8*ekU)RCS7hBIcZ-Yg8b)OPS zSG<%72ImKUu)`tKih_m4tioG6ShdW|TP}6zoj6y{qZ|R*kRG4D+k+ z7K3miA8ekL;`(f0NktOSNS^Ag?}yfJPqs-Tt3>CtTcVf5gm)aDQ@xm2u%A~^pZ+PR z;_KULzlH$KZi;%}@@aqJPjBl`NvNo7FnURi`EO&Rry(;WhIzj2 z0N&Iu0M=BXO=X6|&pJTKR0dj(>mNV`_+nINrwRUHc%;1R%0Gov%sC4%CsJxshWQvx za$$wYxv|Fc z#k@eIw-7c(D!^27K=Q7LGb735BIfPm)N;M0N)D;Wa3YRRX<1XNMT${*o$J+z=mgSI zpIBpzWfJ*YuCBRvb@hD;aYhG6e;J?rRBmSZ=LYwssp+jr?6V=WH#%3>sk-HE!R_IMX33jv#MMo7kQZOErwx zCc*^{Ika}$bH9xRI+t<8<3u5{Ru-FrRBlW9~lEihlBPGR+g@t>ZAh)8@@$-w&r!OR$f=a!pt%}((x15pydgemmIoRPrENzN`u z5d>s@QdW<(lj=*6BJ9?R#I9TtF5t zY2Y6Y7Ip-_YDJia`hHzUI8b46nO)1Ts?2xRlARFBRw1{7Eahfqr}sP$ue#(2dp`8t zwf14jl`w{>IM#uYG z^M_(jL5%z?HiaAKX6)wY1rF_zK&*+p-k+k4ywy^hq||H+TJ;wlyEdag zB1w?FPxNMeo!-ZJyhOGBJm-%UoeOj7kBE28HpDziW!z-iMy;91u%|pe*?1a9edoJb z8sxPh<|4-SPMvGRPniD3UtiE41i#E{>{NToGbhz3t-8`{`IVcj5Q^JuNX+&R(jVsA z@V&tSf3Dg6A0O9O8YeC|gI+$JUwOt8m)Z97DcwLJtMSXl*{VHs>A1XQs4P@ zk>;HF_8#~7cuhmtP+??z{Xf5B7)KVfXCZq69KcX zG#<8YBOoo*ZLKx#N?vUJTSbmGTIyDBb2@!u?<`-~|DCRL=h^XfjDCyuHVB6aEqh#vb{b&I+mNAaAhzi+CfYkKC3nofZuQ#6ruPjs+R5&jNI~-lve{$VFJ~i=i z9PX*vc$Z#g_=Zno4TqHR(bzcwrc^%@Fl+b^H!{cN*Pnq2Y>=ULJf9p7J9YvHZpfDW zo@_^Mlp$~Ky7xD+TVSBYn5y{?6;_o4O9&|kUw1XtN{X}a-eW%Es z1ot_zzd-(m>7RBx9qmbqlDXY2!tXV=g%?)2LxNQ4w~7|>x0<9sN)P(6 z^640uRyFq6gkH_O`jj-Uw1tj~>F~F$08>MHSp!eL{@06=xwga;7I!8Ui4m2`TF-0I zT>t%iVIQ~6wWbyh#xhLM<-S5eeeZs)Y|f6pLo^(?0%HAa4t}X2mQq&5;B_ zIw?XGc#FAEAgkLys-}ASX4N#Vvw0AQxDbd9mh^=uha%BMzDE*8q=_<0J6(`Q%;_;G zRjRBl)BlKtTnH8U{C~s$PtAY-cI*8&{Lc=>{~1g5{ayOsKEnTmND}>h@Za$N?z6~i z`aivU|JejdZ}xZozv2HC1^(ZfApZ|Y{)_g%sD9mf#N$FC^i4WwwPyTheMMQdcQw*x GVgC=t-J44Q diff --git a/docs/theme/docker/static/img/docs-splash-colhead320.png b/docs/theme/docker/static/img/docs-splash-colhead320.png deleted file mode 100755 index 2f6d1b693d5e909c2e3fe0dec48c4be88ad68028..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1313 zcmeAS@N?(olHy`uVBq!ia0y~yU~~YowK$l7r0*-yQXnN+;u=vBoS#-wo>-L1;Fyx1 zl&avFo0y&&l$w}QS$HzlhJk@uB{L+VB*NFnDmgz_FA=0huOhbqsGEVo#=fE;F*!T6 zL?J0PJu}Z%>HY5gN(z}Nwo2iqz6QPp&Z!xh9#uuD!Bu`C$yM3OmMKd1b_zBXRzL%C zQ%e#RDspr3imfVamB8j&0ofp7eI*63l9Fs&C5WRUd;=7m^NUgyO!W+OlMT!a70gWZ z3{4CyO)Pa3j0_A7^bL*l4a{{74XjMftqcqmpg;*|TTx1yRgjAt)Gi>;Rw<*Tq`*pF zzr4I$uiRKKzbIYb(9+UU-@r)U$VeBcLbtdwuOzWTH?LS3W`avKTB%1XJkii(hGOE?jkSNl+@ny;uz{ z4yi0i)elN7&Mz%W21Z<(GRUd|E9aur#FG4?ko^1{SSSW$M8GLcsI`V!{(HkONQpsd>QkUIa|oejwV{RBuFQR?-uC_50zigxK(Y|?3;~e?ZR{3 ze*fKX-^V}w^wM>&`5%;8&Gn0%_FwMbKHYj-_4BX4R$0jSsZB21efRZOAAz_Pt5$vf zx#w zfVw|c+{xNn$711l_ifqAJ1#crMcnx_o?OWndm>U)_QdAdhKhaORhtw0-4;*T@a2%S zmx1v47L!2Vg!%39jOQ|?EU(UacUCDk=)*iAhzb}jCG%^PdPl!NVssIfsM_?AS1z{fl=FJ zL8Buxvy{Yz0}C1%d3k_>%*`NKFi%QCAmIQPFGv=sg+*e`g#*&RL=qSTEM6?l+KLZ*U+IBfRsybQWXdwQbLP>6pAqfylh#{fb6;Z(vMMVS~$e@S=j*ftg6;Uhf59&ghTmgWD0l;*T zI709Y^p6lP1rIRMx#05C~cW=H_Aw*bJ-5DT&Z2n+x)QHX^p z00esgV8|mQcmRZ%02D^@S3L16t`O%c004NIvOKvYIYoh62rY33S640`D9%Y2D-rV&neh&#Q1i z007~1e$oCcFS8neI|hJl{-P!B1ZZ9hpmq0)X0i`JwE&>$+E?>%_LC6RbVIkUx0b+_+BaR3cnT7Zv!AJxW zizFb)h!jyGOOZ85F;a?DAXP{m@;!0_IfqH8(HlgRxt7s3}k3K`kFu>>-2Q$QMFfPW!La{h336o>X zu_CMttHv6zR;&ZNiS=X8v3CR#fknUxHUxJ0uoBa_M6WNWeqIg~6QE69c9o#eyhGvpiOA@W-aonk<7r1(?fC{oI5N*U!4 zfg=2N-7=cNnjjOr{yriy6mMFgG#l znCF=fnQv8CDz++o6_Lscl}eQ+l^ZHARH>?_s@|##Rr6KLRFA1%Q+=*RRWnoLsR`7U zt5vFIcfW3@?wFpwUVxrVZ>QdQz32KIeJ}k~{cZZE^+ya? z2D1z#2HOnI7(B%_ac?{wFUQ;QQA1tBKtrWrm0_3Rgps+?Jfqb{jYbcQX~taRB;#$y zZN{S}1|}gUOHJxc?wV3fxuz+mJ4`!F$IZ;mqRrNsHJd##*D~ju=bP7?-?v~|cv>vB zsJ6IeNwVZxrdjT`yl#bBIa#GxRa#xMMy;K#CDyyGyQdMSxlWT#tDe?p!?5wT$+oGt z8L;Kp2HUQ-ZMJ=3XJQv;x5ci*?vuTfeY$;({XGW_huIFR9a(?@3)XSs8O^N5RyOM=TTmp(3=8^+zpz2r)C z^>JO{deZfso3oq3?Wo(Y?l$ge?uXo;%ru`Vo>?<<(8I_>;8Eq#KMS9gFl*neeosSB zfoHYnBQIkwkyowPu(zdms`p{<7e4kra-ZWq<2*OsGTvEV%s0Td$hXT+!*8Bnh2KMe zBmZRodjHV?r+_5^X9J0WL4jKW`}lf%A-|44I@@LTvf1rHjG(ze6+w@Jt%Bvjts!X0 z?2xS?_ve_-kiKB_KiJlZ$9G`c^=E@oNG)mWWaNo-3TIW8)$Hg0Ub-~8?KhvJ>$ z3*&nim@mj(aCxE5!t{lw7O5^0EIO7zOo&c6l<+|iDySBWCGrz@C5{St!X3hAA}`T4 z(TLbXTq+(;@<=L8dXnssyft|w#WSTW<++3>sgS%(4NTpeI-VAqb|7ssJvzNHgOZVu zaYCvgO_R1~>SyL=cFU|~g|hy|Zi}}s9+d~lYqOB71z9Z$wnC=pR9Yz4DhIM>Wmjgu z&56o6maCpC&F##y%G;1PobR9i?GnNg;gYtchD%p19a!eQtZF&3JaKv33gZ<8D~47E ztUS1iwkmDaPpj=$m#%)jCVEY4fnLGNg2A-`YwHVD3gv};>)hAvT~AmqS>Lr``i7kw zJ{5_It`yrBmlc25DBO7E8;5VoznR>Ww5hAaxn$2~(q`%A-YuS64wkBy=9dm`4cXeX z4c}I@?e+FW+b@^RDBHV(wnMq2zdX3SWv9u`%{xC-q*U}&`cyXV(%rRT*Z6MH?i+i& z_B8C(+grT%{XWUQ+f@NoP1R=AW&26{v-dx)iK^-Nmiuj8txj!m?Z*Ss1N{dh4z}01 z)YTo*JycSU)+_5r4#yw9{+;i4Ee$peRgIj+;v;ZGdF1K$3E%e~4LaI(jC-u%2h$&R z9cLXcYC@Xwnns&bn)_Q~Te?roKGD|d-g^8;+aC{{G(1^(O7m37Y1-+6)01cN&y1aw zoqc{T`P^XJqPBbIW6s}d4{z_f5Om?vMgNQEJG?v2T=KYd^0M3I6IZxbny)%vZR&LD zJpPl@Psh8QyPB@KTx+@RdcC!KX7}kEo;S|j^u2lU7XQ}Oo;f|;z4Ll+_r>@1-xl3| zawq-H%e&ckC+@AhPrP6BKT#_XdT7&;F71j}Joy zkC~6lh7E@6o;W@^IpRNZ{ptLtL(gQ-CY~4mqW;US7Zxvm_|@yz&e53Bp_lTPlfP|z zrTyx_>lv@x#=^!PzR7qqF<$gm`|ZJZ+;<)Cqu&ot2z=0000WV@Og>004R=004l4008;_004mL004C`008P>0026e000+nl3&F} z0001xNkl$Cw%(46SJctNH^mn=q07@yJ8~+9XcPCzfBYN71 P00000NkvXXu0mjfh3RP# diff --git a/docs/theme/docker/static/img/fork-us.png b/docs/theme/docker/static/img/fork-us.png deleted file mode 100644 index efb749c1f98a6c3cee2a9c6fefdd2adc422c36cc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 380 zcmV-?0fYXDP)B@e5;^o!W6b3#920;d41~E0W^}S!8|NZi% zy(rwlfkA>nXi08t5Q6~7kzmNcWURb<(vNq4-hHk)BnlF_7;;h#;tRMEP1Qw>-`@QF z`e)<+$p7x&_x?Hj-xx^&NQs-`(x%T({{H;+@b|4hKu7*Q{a+U)a2S}iu6%X= z^@X4B|NnoubD|tt_%Sev@T%)+J83)J-FI$-t|bF2ngWO-24)84u1G6W24)}+C*WgX a0Z{-X;MVufC7MJ40000bkhN_!|Wn*Vos8{TEhUT@5e;_WJsIMMcG5%>DiS&dv_N`4@J0cnAQ-#>RjZ z00W5t&tJ^l-QC*ST1-p~00u^9XJ=AUl7oW-;2a+x2k__T=grN{+1c4XK0ZL~^z^i$ zp&>vEhr@4fZWb380S18T&!0cQ3IKpHF)?v=b_NIm0Q>vwY7D0baZ)n z31Fa5sELUQARIVaU0nqf0XzT+fB_63aA;@<$l~wse|mcA;^G1TmX?-)e)jkGPfkuA z92@|!<>h5S_4f8QP-JRq>d&7)^Yin8l7K8gED$&_FaV?gY+wLjpoW%~7NDe=nHfMG z5DO3j{R9kv5GbssrUpO)OyvVrlx>u0UKD0i;Dpm5S5dY16(DL5l{ixz|mhJU@&-OWCTb7_%}8-fE(P~+XIRO zJU|wp1|S>|J3KrLcz^+v1f&BDpd>&MAaibR4#5A_4(MucZwG9E1h4@u0P@C8;oo+g zIVj7kfJi{oV~E(NZ*h(@^-(Q(C`Psb3KZ{N;^GB(a8NE*Vwc715!9 zr-H4Ao|T_c6+VT_JH9H+P3>iXSt!a$F`>s`jn`w9GZ_~B!{0soaiV|O_c^R2aWa%}O3jUE)WO=pa zs~_Wz08z|ieY5A%$@FcBF9^!1a}m5ks@7gjn;67N>}S~Hrm`4sM5Hh`q7&5-N{|31 z6x1{ol7BnskoViZ0GqbLa#kW`Z)VCjt1MysKg|rT zi!?s##Ck>8c zpi|>$lGlw#@yMNi&V4`6OBGJ(H&7lqLlcTQ&1zWriG_fL>BnFcr~?;E93{M-xIozQ zO=EHQ#+?<}%@wbWWv23#!V70h9MOuUVaU>3kpTvYfc|LBw?&b*89~Gc9i&8tlT#kF ztpbZoAzkdB+UTy=tx%L3Z4)I{zY(Kb)eg{InobSJmNwPZt$14aS-uc4eKuY8h$dtfyxu^a%zA)>fYI&)@ZXky?^{5>xSC?;w4r&td6vBdi%vHm4=XJH!3yL3?Ep+T5aU_>i;yr_XGq zxZfCzUU@GvnoIk+_Nd`aky>S&H!b*{A%L>?*XPAgWL(Vf(k7qUS}>Zn=U(ZfcOc{B z3*tOHH@t5Ub5D~#N7!Fxx}P2)sy{vE_l(R7$aW&CX>c|&HY+7};vUIietK%}!phrCuh+;C@1usp;XLU<8Gq8P!rEI3ieg#W$!= zQcZr{hp>8sF?k&Yl0?B84OneiQxef-4TEFrq3O~JAZR}yEJHA|Xkqd49tR&8oq{zP zY@>J^HBV*(gJvJZc_0VFN7Sx?H7#75E3#?N8Z!C+_f53YU}pyggxx1?wQi5Yb-_`I`_V*SMx5+*P^b=ec5RON-k1cIlsBLk}(HiaJyab0`CI zo0{=1_LO$~oE2%Tl_}KURuX<`+mQN_sTdM&* zkFf!Xtl^e^gTy6ON=&gTn6)$JHQq2)33R@_!#9?BLNq-Wi{U|rVX7Vny$l6#+SZ@KvQt@VYb%<9JfapI^b9j=wa+Tqb4ei;8c5 z&1>Uz@lVFv6T4Z*YU$r4G`g=91lSeA<=GRZ!*KTWKDPR}NPUW%peCUj`Ix_LDq!8| zMH-V`Pv!a~QkTL||L@cqiTz)*G-0=ytr1KqTuFPan9y4gYD5>PleK`NZB$ev@W%t= zkp)_=lBUTLZJpAtZg;pjI;7r2y|26-N7&a(hX|`1YNM9N8{>8JAuv}hp1v`3JHT-=5lbXpbMq7X~2J5Kl zh7tyU`_AusMFZ{ej9D;Uyy;SQ!4nwgSnngsYBwdS&EO3NS*o04)*juAYl;57c2Ly0(DEZ8IY?zSph-kyxu+D`tt@oU{32J#I{vmy=#0ySPK zA+i(A3yl)qmTz*$dZi#y9FS;$;h%bY+;StNx{_R56Otq+?pGe^T^{5d7Gs&?`_r`8 zD&dzOA|j8@3A&FR5U3*eQNBf<4^4W_iS_()*8b4aaUzfk2 zzIcMWSEjm;EPZPk{j{1>oXd}pXAj!NaRm8{Sjz!D=~q3WJ@vmt6ND_?HI~|wUS1j5 z9!S1MKr7%nxoJ3k`GB^7yV~*{n~O~n6($~x5Bu{7s|JyXbAyKI4+tO(zZYMslK;Zc zzeHGVl{`iP@jfSKq>R;{+djJ9n%$%EL()Uw+sykjNQdflkJZSjqV_QDWivbZS~S{K zkE@T^Jcv)Dfm93!mf$XYnCT--_A$zo9MOkPB6&diM8MwOfV?+ApNv`moV@nqn>&lv zYbN1-M|jc~sG|yLN^1R2=`+1ih3jCshg`iP&mY$GMTcY^W^T`WOCX!{-KHmZ#GiRH zYl{|+KLn5!PCLtBy~9i}`#d^gCDDx$+GQb~uc;V#K3OgbbOG0j5{BRG-si%Bo{@lB zGIt+Ain8^C`!*S0d0OSWVO+Z89}}O8aFTZ>p&k}2gGCV zh#<$gswePFxWGT$4DC^8@84_e*^KT74?7n8!$8cg=sL$OlKr&HMh@Rr5%*Wr!xoOl zo7jItnj-xYgVTX)H1=A2bD(tleEH57#V{xAeW_ezISg5OC zg=k>hOLA^urTH_e6*vSYRqCm$J{xo}-x3@HH;bsHD1Z`Pzvsn}%cvfw%Q(}h`Dgtb z0_J^niUmoCM5$*f)6}}qi(u;cPgxfyeVaaVmOsG<)5`6tzU4wyhF;k|~|x>7-2hXpVBpc5k{L4M`Wbe6Q?tr^*B z`Y*>6*&R#~%JlBIitlZ^qGe3s21~h3U|&k%%jeMM;6!~UH|+0+<5V-_zDqZQN79?n?!Aj!Nj`YMO9?j>uqI9-Tex+nJD z%e0#Yca6(zqGUR|KITa?9x-#C0!JKJHO(+fy@1!B$%ZwJwncQW7vGYv?~!^`#L~Um zOL++>4qmqW`0Chc0T23G8|vO)tK=Z2`gvS4*qpqhIJCEv9i&&$09VO8YOz|oZ+ubd zNXVdLc&p=KsSgtmIPLN69P7xYkYQ1vJ?u1g)T!6Ru`k2wkdj*wDC)VryGu2=yb0?F z>q~~e>KZ0d_#7f3UgV%9MY1}vMgF{B8yfE{HL*pMyhYF)WDZ^^3vS8F zGlOhs%g_~pS3=WQ#494@jAXwOtr^Y|TnQ5zki>qRG)(oPY*f}U_=ip_{qB0!%w7~G zWE!P4p3khyW-JJnE>eECuYfI?^d366Shq!Wm#x&jAo>=HdCllE$>DPO0N;y#4G)D2y#B@5=N=+F%Xo2n{gKcPcK2!hP*^WSXl+ut; zyLvVoY>VL{H%Kd9^i~lsb8j4>$EllrparEOJNT?Ym>vJa$(P^tOG)5aVb_5w^*&M0 zYOJ`I`}9}UoSnYg#E(&yyK(tqr^@n}qU2H2DhkK-`2He% zgXr_4kpXoQHxAO9S`wEdmqGU4j=1JdG!OixdqB4PPP6RXA}>GM zumruUUH|ZG2$bBj)Qluj&uB=dRb)?^qomw?Z$X%#D+Q*O97eHrgVB2*mR$bFBU`*} zIem?dM)i}raTFDn@5^caxE^XFXVhBePmH9fqcTi`TLaXiueH=@06sl}>F%}h9H_e9 z>^O?LxM1EjX}NVppaO@NNQr=AtHcH-BU{yBT_vejJ#J)l^cl69Z7$sk`82Zyw7Wxt z=~J?hZm{f@W}|96FUJfy65Gk8?^{^yjhOahUMCNNpt5DJw}ZKH7b!bGiFY9y6OY&T z_N)?Jj(MuLTN36ZCJ6I5Xy7uVlrb$o*Z%=-)kPo9s?<^Yqz~!Z* z_mP8(unFq65XSi!$@YtieSQ!<7IEOaA9VkKI?lA`*(nURvfKL8cX}-+~uw9|_5)uC2`ZHcaeX7L8aG6Ghleg@F9aG%X$#g6^yP5apnB>YTz&EfS{q z9UVfSyEIczebC)qlVu5cOoMzS_jrC|)rQlAzK7sfiW0`M8mVIohazPE9Jzn*qPt%6 zZL8RELY@L09B83@Be;x5V-IHnn$}{RAT#<2JA%ttlk#^(%u}CGze|1JY5MPhbfnYG zIw%$XfBmA-<_pKLpGKwbRF$#P;@_)ech#>vj25sv25VM$ouo)?BXdRcO{)*OwTw)G zv43W~T6ekBMtUD%5Bm>`^Ltv!w4~65N!Ut5twl!Agrzyq4O2Fi3pUMtCU~>9gt_=h-f% z;1&OuSu?A_sJvIvQ+dZNo3?m1%b1+s&UAx?8sUHEe_sB7zkm4R%6)<@oYB_i5>3Ip zIA+?jVdX|zL{)?TGpx+=Ta>G80}0}Ax+722$XFNJsC1gcH56{8B)*)eU#r~HrC&}` z|EWW92&;6y;3}!L5zXa385@?-D%>dSvyK;?jqU2t_R3wvBW;$!j45uQ7tyEIQva;Db}r&bR3kqNSh)Q_$MJ#Uj3Gj1F;)sO|%6z#@<+ zi{pbYsYS#u`X$Nf($OS+lhw>xgjos1OnF^$-I$u;qhJswhH~p|ab*nO>zBrtb0ndn zxV0uh!LN`&xckTP+JW}gznSpU492)u+`f{9Yr)js`NmfYH#Wdtradc0TnKNz@Su!e zu$9}G_=ku;%4xk}eXl>)KgpuT>_<`Ud(A^a++K&pm3LbN;gI}ku@YVrA%FJBZ5$;m zobR8}OLtW4-i+qPPLS-(7<>M{)rhiPoi@?&vDeVq5%fmZk=mDdRV>Pb-l7pP1y6|J z8I>sF+TypKV=_^NwBU^>4JJq<*14GLfM2*XQzYdlqqjnE)gZsPW^E@mp&ww* zW9i>XL=uwLVZ9pO*8K>t>vdL~Ek_NUL$?LQi5sc#1Q-f6-ywKcIT8Kw?C(_3pbR`e|)%9S-({if|E+hR2W!&qfQ&UiF^I!|M#xhdWsenv^wpKCBiuxXbnp85`{i|;BM?Ba`lqTA zyRm=UWJl&E{8JzYDHFu>*Z10-?#A8D|5jW9Ho0*CAs0fAy~MqbwYuOq9jjt9*nuHI zbDwKvh)5Ir$r!fS5|;?Dt>V+@F*v8=TJJF)TdnC#Mk>+tGDGCw;A~^PC`gUt*<(|i zB{{g{`uFehu`$fm4)&k7`u{xIV)yvA(%5SxX9MS80p2EKnLtCZ>tlX>*Z6nd&6-Mv$5rHD*db;&IBK3KH&M<+ArlGXDRdX1VVO4)&R$f4NxXI>GBh zSv|h>5GDAI(4E`@F?EnW zS>#c&Gw6~_XL`qQG4bK`W*>hek4LX*efn6|_MY+rXkNyAuu?NxS%L7~9tD3cn7&p( zCtfqe6sjB&Q-Vs7BP5+%;#Gk};4xtwU!KY0XXbmkUy$kR9)!~?*v)qw00!+Yg^#H> zc#8*z6zZo>+(bud?K<*!QO4ehiTCK&PD4G&n)Tr9X_3r-we z?fI+}-G~Yn93gI6F{}Dw_SC*FLZ)5(85zp4%uubtD)J)UELLkvGk4#tw&Tussa)mTD$R2&O~{ zCI3>fr-!-b@EGRI%g0L8UU%%u_<;e9439JNV;4KSxd|78v+I+8^rmMf3f40Jb}wEszROD?xBZu>Ll3;sUIoNxDK3|j3*sam2tC@@e$ z^!;+AK>efeBJB%ALsQ{uFui)oDoq()2USi?n=6C3#eetz?wPswc={I<8x=(8lE4EIsUfyGNZ{|KYn1IR|=E==f z(;!A5(-2y^2xRFCSPqzHAZn5RCN_bp22T(KEtjA(rFZ%>a4@STrHZflxKoqe9Z4@^ zM*scx_y73?Q{vt6?~WEl?2q*;@8 z3M*&@%l)SQmXkcUm)d@GT2#JdzhfSAP9|n#C;$E8X|pwD!r#X?0P>0ZisQ~TNqupW z*lUY~+ikD`vQb?@SAWX#r*Y+;=_|oacL$2CL$^(mV}aKO77pg}O+-=T1oLBT5sL2i z42Qth2+0@C`c+*D0*5!qy26sis<9a7>LN2{z%Qj49t z=L@x`4$ALHb*3COHoT?5S_c(Hs}g!V>W^=6Q0}zaubkDn)(lTax0+!+%B}9Vqw6{H zvL|BRM`O<@;eVi1DzM!tXtBrA20Ce@^Jz|>%X-t`vi-%WweXCh_LhI#bUg2*pcP~R z*RuTUzBKLXO~~uMd&o$v3@d0shHfUjC6c539PE6rF&;Ufa(Rw@K1*m7?f5)t`MjH0 z)_V(cajV5Am>f!kWcI@5rE8t6$S>5M=k=aRZROH6fA^jJp~2NlR4;Q2>L$7F#RT#9 z>4@1RhWG`Khy>P2j1Yx^BBL{S`niMaxlSWV-JBU0-T9zZ%>7mR3l$~QV$({o0;jTI ze5=cN^!Bc2bT|BcojXp~K#2cM>OTe*cM{Kg-j*CkiW)EGQot^}s;cy8_1_@JA0Whq zlrNr+R;Efa+`6N)s5rH*|E)nYZ3uqkk2C(E7@A|3YI`ozP~9Lexx#*1(r8luq+YPk z{J}c$s` zPM35Fx(YWB3Z5IYnN+L_4|jaR(5iWJi2~l&xy}aU7kW?o-V*6Av2wyZTG!E2KSW2* zGRLQkQU;Oz##ie-Z4fI)WSRxn$(ZcD;TL+;^r=a4(G~H3ZhK$lSXZj?cvyY8%d9JM zzc3#pD^W_QnWy#rx#;c&N@sqHhrnHRmj#i;s%zLm6SE(n&BWpd&f7>XnjV}OlZntI70fq%8~9<7 zMYaw`E-rp49-oC1N_uZTo)Cu%RR2QWdHpzQIcNsoDp`3xfP+`gI?tVQZ4X={qU?(n zV>0ASES^Xuc;9JBji{)RnFL(Lez;8XbB1uWaMp@p?7xhXk6V#!6B@aP4Rz7-K%a>i z?fvf}va_DGUXlI#4--`A3qK7J?-HwnG7O~H2;zR~RLW)_^#La!=}+>KW#anZ{|^D3 B7G?kd diff --git a/docs/theme/docker/static/img/glyphicons-halflings.png b/docs/theme/docker/static/img/glyphicons-halflings.png deleted file mode 100755 index a9969993201f9cee63cf9f49217646347297b643..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12799 zcma*OWmH^Ivn@*S;K3nSf_t!#;0f+&pm7Po8`nk}2q8f5;M%x$SdAkd9FAvlc$ zx660V9e3Ox@4WZ^?7jZ%QFGU-T~%||Ug4iK6bbQY@zBuF2$hxOw9wF=A)nUSxR_5@ zEX>HBryGrjyuOFFv$Y4<+|3H@gQfEqD<)+}a~mryD|1U9*I_FOG&F%+Ww{SJ-V2BR zjt<81Ek$}Yb*95D4RS0HCps|uLyovt;P05hchQb-u2bzLtmog&f2}1VlNhxXV);S9 zM2buBg~!q9PtF)&KGRgf3#z7B(hm5WlNClaCWFs!-P!4-u*u5+=+D|ZE9e`KvhTHT zJBnLwGM%!u&vlE%1ytJ=!xt~y_YkFLQb6bS!E+s8l7PiPGSt9xrmg?LV&&SL?J~cI zS(e9TF1?SGyh+M_p@o1dyWu7o7_6p;N6hO!;4~ z2B`I;y`;$ZdtBpvK5%oQ^p4eR2L)BH>B$FQeC*t)c`L71gXHPUa|vyu`Bnz)H$ZcXGve(}XvR!+*8a>BLV;+ryG1kt0=)ytl zNJxFUN{V7P?#|Cp85QTa@(*Q3%K-R(Pkv1N8YU*(d(Y}9?PQ(j;NzWoEVWRD-~H$=f>j9~PN^BM2okI(gY-&_&BCV6RP&I$FnSEM3d=0fCxbxA6~l>54-upTrw zYgX@%m>jsSGi`0cQt6b8cX~+02IghVlNblR7eI;0ps}mpWUcxty1yG56C5rh%ep(X z?)#2d?C<4t-KLc*EAn>>M8%HvC1TyBSoPNg(4id~H8JwO#I)Bf;N*y6ai6K9_bA`4 z_g9(-R;qyH&6I$`b42v|0V3Z8IXN*p*8g$gE98+JpXNY+jXxU0zsR^W$#V=KP z3AEFp@OL}WqwOfsV<)A^UTF4&HF1vQecz?LWE@p^Z2){=KEC_3Iopx_eS42>DeiDG zWMXGbYfG~W7C8s@@m<_?#Gqk;!&)_Key@^0xJxrJahv{B&{^!>TV7TEDZlP|$=ZCz zmX=ZWtt4QZKx**)lQQoW8y-XLiOQy#T`2t}p6l*S`68ojyH@UXJ-b~@tN`WpjF z%7%Yzv807gsO!v=!(2uR)16!&U5~VPrPHtGzUU?2w(b1Xchq}(5Ed^G|SD7IG+kvgyVksU) z(0R)SW1V(>&q2nM%Z!C9=;pTg!(8pPSc%H01urXmQI6Gi^dkYCYfu6b4^tW))b^U+ z$2K&iOgN_OU7n#GC2jgiXU{caO5hZt0(>k+c^(r><#m|#J^s?zA6pi;^#*rp&;aqL zRcZi0Q4HhVX3$ybclxo4FFJW*`IV`)Bj_L3rQe?5{wLJh168Ve1jZv+f1D}f0S$N= zm4i|9cEWz&C9~ZI3q*gwWH^<6sBWuphgy@S3Qy?MJiL>gwd|E<2h9-$3;gT9V~S6r z)cAcmE0KXOwDA5eJ02-75d~f?3;n7a9d_xPBJaO;Z)#@s7gk5$Qn(Fc^w@9c5W0zY z59is0?Mt^@Rolcn{4%)Ioat(kxQH6}hIykSA)zht=9F_W*D#<}N(k&&;k;&gKkWIL z0Of*sP=X(Uyu$Pw;?F@?j{}=>{aSHFcii#78FC^6JGrg-)!)MV4AKz>pXnhVgTgx8 z1&5Y=>|8RGA6++FrSy=__k_imx|z-EI@foKi>tK0Hq2LetjUotCgk2QFXaej!BWYL zJc{fv(&qA7UUJ|AXLc5z*_NW#yWzKtl(c8mEW{A>5Hj^gfZ^HC9lQNQ?RowXjmuCj4!!54Us1=hY z0{@-phvC}yls!PmA~_z>Y&n&IW9FQcj}9(OLO-t^NN$c0o}YksCUWt|DV(MJB%%Sr zdf}8!9ylU2TW!=T{?)g-ojAMKc>3pW;KiZ7f0;&g)k}K^#HBhE5ot)%oxq$*$W@b# zg4p<Ou`ME|Kd1WHK@8 zzLD+0(NHWa`B{em3Ye?@aVsEi>y#0XVZfaFuq#;X5C3{*ikRx7UY4FF{ZtNHNO?A_ z#Q?hwRv~D8fPEc%B5E-ZMI&TAmikl||EERumQCRh7p;)>fdZMxvKq;ky0}7IjhJph zW*uuu*(Y6)S;Od--8uR^R#sb$cmFCnPcj9PPCWhPN;n`i1Q#Qn>ii z{WR|0>8F`vf&#E(c2NsoH=I7Cd-FV|%(7a`i}gZw4N~QFFG2WtS^H%@c?%9UZ+kez z;PwGgg_r6V>Kn5n(nZ40P4qMyrCP3bDkJp@hp6&X3>gzC>=f@Hsen<%I~7W+x@}b> z0}Et*vx_50-q@PIV=(3&Tbm}}QRo*FP2@)A#XX-8jYspIhah`9ukPBr)$8>Tmtg&R z?JBoH17?+1@Y@r>anoKPQ}F8o9?vhcG79Cjv^V6ct709VOQwg{c0Q#rBSsSmK3Q;O zBpNihl3S0_IGVE)^`#94#j~$;7+u870yWiV$@={|GrBmuz4b)*bCOPkaN0{6$MvazOEBxFdKZDlbVvv{8_*kJ zfE6C`4&Kkz<5u%dEdStd85-5UHG5IOWbo8i9azgg#zw-(P1AA049hddAB*UdG3Vn0 zX`OgM+EM|<+KhJ<=k?z~WA5waVj?T9eBdfJGebVifBKS1u<$#vl^BvSg)xsnT5Aw_ZY#}v*LXO#htB>f}x3qDdDHoFeb zAq7;0CW;XJ`d&G*9V)@H&739DpfWYzdQt+Kx_E1K#Cg1EMtFa8eQRk_JuUdHD*2;W zR~XFnl!L2A?48O;_iqCVr1oxEXvOIiN_9CUVTZs3C~P+11}ebyTRLACiJuMIG#`xP zKlC|E(S@QvN+%pBc6vPiQS8KgQAUh75C0a2xcPQDD$}*bM&z~g8+=9ltmkT$;c;s z5_=8%i0H^fEAOQbHXf0;?DN5z-5+1 zDxj50yYkz4ox9p$HbZ|H?8ukAbLE^P$@h}L%i6QVcY>)i!w=hkv2zvrduut%!8>6b zcus3bh1w~L804EZ*s96?GB&F7c5?m?|t$-tp2rKMy>F*=4;w*jW}^;8v`st&8)c; z2Ct2{)?S(Z;@_mjAEjb8x=qAQvx=}S6l9?~H?PmP`-xu;ME*B8sm|!h@BX4>u(xg_ zIHmQzp4Tgf*J}Y=8STR5_s)GKcmgV!$JKTg@LO402{{Wrg>#D4-L%vjmtJ4r?p&$F!o-BOf7ej~ z6)BuK^^g1b#(E>$s`t3i13{6-mmSp7{;QkeG5v}GAN&lM2lQT$@(aQCcFP(%UyZbF z#$HLTqGT^@F#A29b0HqiJsRJAlh8kngU`BDI6 zJUE~&!cQ*&f95Ot$#mxU5+*^$qg_DWNdfu+1irglB7yDglzH()2!@#rpu)^3S8weW z_FE$=j^GTY*|5SH95O8o8W9FluYwB=2PwtbW|JG6kcV^dMVmX(wG+Otj;E$%gfu^K z!t~<3??8=()WQSycsBKy24>NjRtuZ>zxJIED;YXaUz$@0z4rl+TW zWxmvM$%4jYIpO>j5k1t1&}1VKM~s!eLsCVQ`TTjn3JRXZD~>GM z$-IT~(Y)flNqDkC%DfbxaV9?QuWCV&-U1yzrV@0jRhE;)ZO0=r-{s@W?HOFbRHDDV zq;eLo+wOW;nI|#mNf(J?RImB9{YSO2Y`9825Lz#u4(nk3)RGv3X8B(A$TsontJ8L! z9JP^eWxtKC?G8^xAZa1HECx*rp35s!^%;&@Jyk)NexVc)@U4$^X1Dag6`WKs|(HhZ#rzO2KEw3xh~-0<;|zcs0L>OcO#YYX{SN8m6`9pp+ zQG@q$I)T?aoe#AoR@%om_#z=c@ych!bj~lV13Qi-xg$i$hXEAB#l=t7QWENGbma4L zbBf*X*4oNYZUd_;1{Ln_ZeAwQv4z?n9$eoxJeI?lU9^!AB2Y~AwOSq67dT9ADZ)s@ zCRYS7W$Zpkdx$3T>7$I%3EI2ik~m!f7&$Djpt6kZqDWZJ-G{*_eXs*B8$1R4+I}Kf zqniwCI64r;>h2Lu{0c(#Atn)%E8&)=0S4BMhq9$`vu|Ct;^ur~gL`bD>J@l)P$q_A zO7b3HGOUG`vgH{}&&AgrFy%K^>? z>wf**coZ2vdSDcNYSm~dZ(vk6&m6bVKmVgrx-X<>{QzA!)2*L+HLTQz$e8UcB&Djq zl)-%s$ZtUN-R!4ZiG=L0#_P=BbUyH+YPmFl_ogkkQ$=s@T1v}rNnZ^eMaqJ|quc+6 z*ygceDOrldsL30w`H;rNu+IjlS+G~p&0SawXCA1+D zC%cZtjUkLNq%FadtHE?O(yQTP486A{1x<{krq#rpauNQaeyhM3*i0%tBpQHQo-u)x z{0{&KS`>}vf2_}b160XZO2$b)cyrHq7ZSeiSbRvaxnKUH{Q`-P(nL&^fcF2){vhN- zbX&WEjP7?b4A%0y6n_=m%l00uZ+}mCYO(!x?j$+O$*TqoD_Q5EoyDJ?w?^UIa491H zE}87(bR`X;@u#3Qy~9wWdWQIg1`cXrk$x9=ccR|RY1~%{fAJ@uq@J3e872x0v$hmv ze_KcL(wM|n0EOp;t{hKoohYyDmYO;!`7^Lx;0k=PWPGZpI>V5qYlzjSL_(%|mud50 z7#{p97s`U|Sn$WYF>-i{i4`kzlrV6a<}=72q2sAT7Zh{>P%*6B;Zl;~0xWymt10Mo zl5{bmR(wJefJpNGK=fSRP|mpCI-)Nf6?Pv==FcFmpSwF1%CTOucV{yqxSyx4Zws3O z8hr5Uyd%ezIO7?PnEO0T%af#KOiXD$e?V&OX-B|ZX-YsgSs%sv-6U+sLPuz{D4bq| zpd&|o5tNCmpT>(uIbRf?8c}d3IpOb3sn6>_dr*26R#ev<_~vi)wleW$PX|5)$_ z+_|=pi(0D(AB_sjQ;sQQSM&AWqzDO1@NHw;C9cPdXRKRI#@nUW)CgFxzQ1nyd!+h& zcjU!U=&u|>@}R(9D$%lu2TlV>@I2-n@fCr5PrZNVyKWR7hm zWjoy^p7v8m#$qN0K#8jT- zq`mSirDZDa1Jxm;Rg3rAPhC)LcI4@-RvKT+@9&KsR3b0_0zuM!Fg7u>oF>3bzOxZPU&$ab$Z9@ zY)f7pKh22I7ZykL{YsdjcqeN++=0a}elQM-4;Q)(`Ep3|VFHqnXOh14`!Bus& z9w%*EWK6AiAM{s$6~SEQS;A>ey$#`7)khZvamem{P?>k)5&7Sl&&NXKk}o!%vd;-! zpo2p-_h^b$DNBO>{h4JdGB=D>fvGIYN8v&XsfxU~VaefL?q} z3ekM?iOKkCzQHkBkhg=hD!@&(L}FcHKoa zbZ7)H1C|lHjwEb@tu=n^OvdHOo7o+W`0-y3KdP#bb~wM=Vr_gyoEq|#B?$&d$tals ziIs-&7isBpvS|CjC|7C&3I0SE?~`a%g~$PI%;au^cUp@ER3?mn-|vyu!$7MV6(uvt z+CcGuM(Ku2&G0tcRCo7#D$Dirfqef2qPOE5I)oCGzmR5G!o#Q~(k~)c=LpIfrhHQk zeAva6MilEifE7rgP1M7AyWmLOXK}i8?=z2;N=no)`IGm#y%aGE>-FN zyXCp0Sln{IsfOBuCdE*#@CQof%jzuU*jkR*Su3?5t}F(#g0BD0Zzu|1MDes8U7f9; z$JBg|mqTXt`muZ8=Z`3wx$uizZG_7>GI7tcfOHW`C2bKxNOR)XAwRkLOaHS4xwlH4 zDpU29#6wLXI;H?0Se`SRa&I_QmI{zo7p%uveBZ0KZKd9H6@U?YGArbfm)D*^5=&Rp z`k{35?Z5GbZnv>z@NmJ%+sx=1WanWg)8r}C_>EGR8mk(NR$pW<-l8OTU^_u3M@gwS z7}GGa1)`z5G|DZirw;FB@VhH7Dq*0qc=|9lLe{w2#`g+_nt>_%o<~9(VZe=zI*SSz4w43-_o>4E4`M@NPKTWZuQJs)?KXbWp1M zimd5F;?AP(LWcaI-^Sl{`~>tmxsQB9Y$Xi*{Zr#py_+I$vx7@NY`S?HFfS!hUiz$a z{>!&e1(16T!Om)m)&k1W#*d#GslD^4!TwiF2WjFBvi=Ms!ADT)ArEW6zfVuIXcXVk z>AHjPADW+mJzY`_Ieq(s?jbk4iD2Rb8*V3t6?I+E06(K8H!!xnDzO%GB;Z$N-{M|B zeT`jo%9)s%op*XZKDd6*)-^lWO{#RaIGFdBH+;XXjI(8RxpBc~azG1H^2v7c^bkFE zZCVPE+E*Q=FSe8Vm&6|^3ki{9~qafiMAf7i4APZg>b%&5>nT@pHH z%O*pOv(77?ZiT{W zBibx}Q12tRc7Py1NcZTp`Q4ey%T_nj@1WKg5Fz_Rjl4wlJQj)rtp8yL3r!Shy zvZvnmh!tH4T6Js-?vI0<-rzzl{mgT*S0d_7^AU_8gBg^03o-J=p(1o6kww2hx|!%T z-jqp}m^G*W?$!R#M%Ef?&2jYxmx+lXWZszpI4d$pUN`(S)|*c^CgdwY>Fa>> zgGBJhwe8y#Xd*q0=@SLEgPF>+Qe4?%E*v{a`||luZ~&dqMBrRfJ{SDMaJ!s_;cSJp zSqZHXIdc@@XteNySUZs^9SG7xK`8=NBNM)fRVOjw)D^)w%L2OPkTQ$Tel-J)GD3=YXy+F4in(ILy*A3m@3o73uv?JC}Q>f zrY&8SWmesiba0|3X-jmlMT3 z*ST|_U@O=i*sM_*48G)dgXqlwoFp5G6qSM3&%_f_*n!PiT>?cNI)fAUkA{qWnqdMi+aNK_yVQ&lx4UZknAc9FIzVk% zo6JmFH~c{_tK!gt4+o2>)zoP{sR}!!vfRjI=13!z5}ijMFQ4a4?QIg-BE4T6!#%?d&L;`j5=a`4is>U;%@Rd~ zXC~H7eGQhhYWhMPWf9znDbYIgwud(6$W3e>$W4$~d%qoJ z+JE`1g$qJ%>b|z*xCKenmpV$0pM=Gl-Y*LT8K+P)2X#;XYEFF4mRbc~jj?DM@(1e`nL=F4Syv)TKIePQUz)bZ?Bi3@G@HO$Aps1DvDGkYF50O$_welu^cL7;vPiMGho74$;4fDqKbE{U zd1h{;LfM#Fb|Z&uH~Rm_J)R~Vy4b;1?tW_A)Iz#S_=F|~pISaVkCnQ0&u%Yz%o#|! zS-TSg87LUfFSs{tTuM3$!06ZzH&MFtG)X-l7>3)V?Txuj2HyG*5u;EY2_5vU0ujA? zHXh5G%6e3y7v?AjhyX79pnRBVr}RmPmtrxoB7lkxEzChX^(vKd+sLh?SBic=Q)5nA zdz7Mw3_iA>;T^_Kl~?1|5t%GZ;ki_+i>Q~Q1EVdKZ)$Sh3LM@ea&D~{2HOG++7*wF zAC6jW4>fa~!Vp5+$Z{<)Qxb|{unMgCv2)@%3j=7)Zc%U<^i|SAF88s!A^+Xs!OASYT%7;Jx?olg_6NFP1475N z#0s<@E~FI}#LNQ{?B1;t+N$2k*`K$Hxb%#8tRQi*Z#No0J}Pl;HWb){l7{A8(pu#@ zfE-OTvEreoz1+p`9sUI%Y{e5L-oTP_^NkgpYhZjp&ykinnW;(fu1;ttpSsgYM8ABX4dHe_HxU+%M(D=~) zYM}XUJ5guZ;=_ZcOsC`_{CiU$zN3$+x&5C`vX-V3`8&RjlBs^rf00MNYZW+jCd~7N z%{jJuUUwY(M`8$`B>K&_48!Li682ZaRknMgQ3~dnlp8C?__!P2z@=Auv;T^$yrsNy zCARmaA@^Yo2sS%2$`031-+h9KMZsIHfB>s@}>Y(z988e!`%4=EDoAQ0kbk>+lCoK60Mx9P!~I zlq~wf7kcm_NFImt3ZYlE(b3O1K^QWiFb$V^a2Jlwvm(!XYx<`i@ZMS3UwFt{;x+-v zhx{m=m;4dgvkKp5{*lfSN3o^keSpp9{hlXj%=}e_7Ou{Yiw(J@NXuh*;pL6@$HsfB zh?v+r^cp@jQ4EspC#RqpwPY(}_SS$wZ{S959`C25777&sgtNh%XTCo9VHJC-G z;;wi9{-iv+ETiY;K9qvlEc04f;ZnUP>cUL_T*ms``EtGoP^B#Q>n2dSrbAg8a>*Lg zd0EJ^=tdW~7fbcLFsqryFEcy*-8!?;n%;F+8i{eZyCDaiYxghr z$8k>L|2&-!lhvuVdk!r-kpSFl`5F5d4DJr%M4-qOy3gdmQbqF1=aBtRM7)c_Ae?$b8 zQg4c8*KQ{XJmL)1c7#0Yn0#PTMEs4-IHPjkn0!=;JdhMXqzMLeh`yOylXROP- zl#z3+fwM9l3%VN(6R77ua*uI9%hO7l7{+Hcbr(peh;afUK?B4EC09J{-u{mv)+u#? zdKVBCPt`eU@IzL)OXA`Ebu`Xp?u0m%h&X41}FNfnJ*g1!1wcbbpo%F4x!-#R9ft!8{5`Ho}04?FI#Kg zL|k`tF1t_`ywdy8(wnTut>HND(qNnq%Sq=AvvZbXnLx|mJhi!*&lwG2g|edBdVgLy zjvVTKHAx(+&P;P#2Xobo7_RttUi)Nllc}}hX>|N?-u5g7VJ-NNdwYcaOG?NK=5)}` zMtOL;o|i0mSKm(UI_7BL_^6HnVOTkuPI6y@ZLR(H?c1cr-_ouSLp{5!bx^DiKd*Yb z{K78Ci&Twup zTKm)ioN|wcYy%Qnwb)IzbH>W!;Ah5Zdm_jRY`+VRJ2 zhkspZ9hbK3iQD91A$d!0*-1i#%x81|s+SPRmD}d~<1p6!A13(!vABP2kNgqEG z?AMgl^P+iRoIY(9@_I?n1829lGvAsRnHwS~|5vD2+Zi53j<5N4wNn0{q>>jF9*bI) zL$kMXM-awNOElF>{?Jr^tOz1glbwaD-M0OKOlTeW3C!1ZyxRbB>8JDof(O&R1bh%3x#>y2~<>OXO#IIedH0Q`(&&?eo-c~ z>*Ah#3~09unym~UC-UFqqI>{dmUD$Y4@evG#ORLI*{ZM)Jl=e1it!XzY($S3V zLG!Y6fCjE>x6r@5FG1n|8ompSZaJ>9)q6jqU;XxCQk9zV(?C9+i*>w z21+KYt1gXX&0`x3E)hS7I5}snbBzox9C@Xzcr|{B8Hw;SY1$}&BoYKXH^hpjW-RgJ z-Fb}tannKCv>y~^`r|(1Q9;+sZlYf3XPSX|^gR01UFtu$B*R;$sPZdIZShRr>|b@J z;#G{EdoY+O;REEjQ}X7_YzWLO+Ey3>a_KDe1CjSe| z6arqcEZ)CX!8r(si`dqbF$uu&pnf^Np{1f*TdJ`r2;@SaZ z#hb4xlaCA@Pwqj#LlUEe5L{I$k(Zj$d3(~)u(F%&xb8={N9hKxlZIO1ABsM{Mt|)2 zJ^t9Id;?%4PfR4&Ph9B9cFK~@tG3wlFW-0fXZS_L4U*EiAA%+`h%q2^6BCC;t0iO4V=s4Qug{M|iDV@s zC7|ef-dxiR7T&Mpre!%hiUhHM%3Qxi$Lzw6&(Tvlx9QA_7LhYq<(o~=Y>3ka-zrQa zhGpfFK@)#)rtfz61w35^sN1=IFw&Oc!Nah+8@qhJ0UEGr;JplaxOGI82OVqZHsqfX ze1}r{jy;G?&}Da}a7>SCDsFDuzuseeCKof|Dz2BPsP8? zY;a)Tkr2P~0^2BeO?wnzF_Ul-ekY=-w26VnU%U3f19Z-pj&2 z4J_a|o4Dci+MO)mPQIM>kdPG1xydiR9@#8m zh27D7GF{p|a{8({Q-Pr-;#jV{2zHR>lGoFtIfIpoMo?exuQyX_A;;l0AP4!)JEM$EwMInZkj+8*IHP4vKRd zKx_l-i*>A*C@{u%ct`y~s6MWAfO{@FPIX&sg8H{GMDc{4M3%$@c8&RAlw0-R<4DO3 trJqdc$mBpWeznn?E0M$F`|3v=`3%T2A17h;rxP7$%JLd=6(2u;`(N3pt&so# diff --git a/docs/theme/docker/static/img/hiring_graphic.png b/docs/theme/docker/static/img/hiring_graphic.png deleted file mode 100644 index b3a6b996440f5366e5bde53f392b0759e87abe17..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6954 zcmd^E^;cBw+NKmmKtMrC7#hhLau}o==?;kj1{i4=x>H&KfuSTMBm@MJP=ujl=tz!x3t31497JU{-K@5&FF!4fFuGg$TVazcNtSQ3_@am-ljp zX?m$>nS0ro3t7;Mi2`1FK<*6K!4Ob@hn=mx3&cZ&{vUQBcjrH9ZhF8!AP5@~`hNzc zr>q8$a&U$L__=_b=0E`uK!A@6#0M1O6XF2y06{=*5Dzy{kP`%i@C!nKAi!UX{?41T zg(XBoTJ|qrcajLbH3H!X;pTRCcjt2F<#KSg;syx`32_5?xOsRu?+~0Wp7sc+2dBLY z!`~L9VJ_y*a7P5(!5;9(BGk;m6(K@@7wJD!uya&a{+D5Um%j>iS2Au7s3SLs3&?F} z_b0D^pj{9euz!c~zoK2VJRM=&8ZZ|JS7-CPepoX6O}?w$e|Pi;coz*s+QHn_4rY%~ zkQSl8({Ne9Eg({0X(5n=w1A`_m?}C8Zo5TL* z!vD?X|F2w#lrs#9aB$XgaIpQmdurAW2nQEy2S z;l6Xu{il)t(b&ILchmCc_^&a&6aF>mF#EgdcD@^9*!(2f-P3`grlcj)rLMj5wX#)4 zu^GsJeR{h6qkiL8<1Yr*At%?F)btjx$W&ay@0**8on18Cb`S=i&dFW=QI9sW>{L)W z-Pl0u8%@T%?-Y@2k&qp-a~g8S!1o#mLqrAv@yf+r+_h(B0EWPSYWyIP4oR zosre5q=uqr-K?w{fLit#K^rLnQ;{)k^2#V0#_7W1PHla(og<2w<79fKm4oN{{Cqkz zV$#cRe`IvX2)aKoxQuGv86G_upBRpaKAf33n3$N4h*^kF-0A6S6L>k9n2gpo=y7sI zu>lVj7B-7ZuMZDzE-zcS_|CR>rhEgM_#vG}&{iJs<>6t=3$8XDgM;Sgt*V;emzTG{ ze=p{JKAD{#4h8HIUrT?bq;xqWtDb_ULr8S-OWCBGSC6vBMsMHl#KcBt zXNRO*hn!NIn$|{3TN^}tAt!e}E2o7GxYOCS+1|0!(sr@G-zF-(kcdQa@$K{vZlIb* zl2Wer_dEC@4ZPr0bkD`+R@W=les!HbSC7-B<&~n6gR${n9H15!&Q4X$ac9@|*U)}t z_4(MiUT^<{-v0T#g65YJ=Xdclv2_?2?+guhY3lBDq4%0v(6Wj5UrmBQlr zqLSm0(X;*iCLVB)hVFD;!Rgl4>Du~k1L~l?vxT0u*Tnp4Z-2e1WoK;c_tn+!^YbQ% z*uv+cqt(?;-ParWg}u73HwOn-a`Ji&Oim`J+Cc*CW|r4yXV*u^^(5r|q2cv}q&a%MGOo~41|V?=AHQed0}Xt;WA-h zJV{cJmeBH;L>u~wYr>{7m7w?*O)W8nYmOGdOBU%)Ki^W1JINuRv~myL=W>m-5moTp z`tVU8oy#UZ|8c^gy4fH;e^ro%1aDV6{gdu44-AxC_j>c%#!dK`vFn}?m1Ypbh0~ zoD{XO`Rh5cCrY#J*vw=fI<>uRTD_HfP8S3HDUnN8)wb@1HWoZ_LIt^!J~t=7cg3ty zR#m86n=vr))hjdR&76k=&h}j0O@b(hMb*nEvs(4ta&4>Xz52^sygdR&tad4>)nyc+ zF@ZJ}PLEd0(#E3RUgDq|Ukr=}g=(*NZX(*5=&CxErs*0d{>cnoknwLXSH>JkEx6sQjab|+FVhir%isNrey1ey`E!DC{G@jm0qqi{Vbk4bk62s1qfx$ zA}Ul-_>bO+mQzYbBb#LJ;q2gv>Wy`pb?f!0RWzv)5)*AdJ$sI<{3K-$#w_A7ba{H; zEr1o~Xk8#28PfQ9U?T5+#AHhCinNb0EvKfs$G6DbZALD^P1ksIrks1{QI&YN<%|Bv zh^_ml+SYFgMXzhyh*3>}VL>NzWdz&uZ<58RfIb&>?6bidNPyjm4c7^2U-SbLZ;&i$ z8iNbuOJgV1nTp)UhKiR8yMFH+1SRx_bX#H0c1~BNr0;yok8zoln14 zt*#l^RTT_41*O9)e&#bin3Bj$ZLFfqJ(?xjERDC9(_&z+8Y#}!v&!`1 zx6paa!p8S{R1Rn5VALm^$#S}fwRmk&qq`;i6JfP8EVPD|T9R5yKIEp6O$D?8>ffKV z$2f)x9n~w)x>=AgUvY>glczTquT*YplRWGvlm37m=77Bva)0ehwYm0Fye-#fCcbLG zo5zVc}vd3}R06RHRus!zpqaVEZ z&HLKG6q%e;O$~g_MzsWsv2m+iDs+fK-iC4jM{qhGLCLGw+4CQBE9|K54Y6kFR&Nc# z{)EoY`nvob?jWfeU9}#i3We~Vu!Ct*ls`LO{hT!=84WvYiD*U`CO(#-keJbt|Ii|T z3JGh_$K#<7EI1`uEOT&9OJ(HVrE}y&XR&g#eZqS zP_m;v0v4%oOw z73GCysXJ7{ff_}uG=8mnv+ttT#lY5yO?pvJN;B9iGASl^WrB&nkDCw0>q>nv&>^6n zD6{Ki53dnXjLlBp+z<38{P@z+ z8%=Y$_cY5LFVy0M{rR~2H9XR%mp96dQOLGyzkVxhj!mFB@D~+jInEQs_XdyDG*{f2a}8Nzi46GU?&YttCI!$g&b@6Hrz+ij#l(4!j4jnv_#6k5KPwBr zEL#(pFiRx~y21PWYN+lgMYTBR*YmlEwRev&*=($0Ji#h_r{j-zm2ZAv4jnGf=uEw>Gj*&Kv9ggT z_n@v&DHcd$jGM!a6YmiK*xsfK5v+DK((Jt?*TGS)_%MUWw*&mD{B+@_Yc;QiA3uasZFk#i z_PL>5{Zm9NZ&r(=kQa{T@<^MZdK69*$upAL4sRkEaYm(_izi3}8mpy!M($7vfs$xI z2*uUx&LRu#%y;n@DKo91F>%Fo55o2@BZH3z)Y7!xd~|gHCZB7E8TJ?K#7S&!);R?^y-C~ZD+|5>t7P3N8wyM`(+5zysYoCs`h5W*tV?{DVW0Yux@ zw=itYOgS;!1E$61gbp<}@i3e?|KL_9pqzQ$Q14^w*e+ON^wsvzs0bDi7*akw)MbEW z<$ZLvP9FLil8uYID~IREGn9}|WlFBH9M2W$Iw69qS)4KF>EKzM@j(BV&VOG0*A(_zuSfcRAm%W^Xb6yJBV;t~mn-T-}gWHI{ z-fR}mJUOp-MhIQS_uvyQ0@I)G+{32(D+kja96zl?iRjV=m_i`wpoI;DR*SeIyfkZoz;Yo zHfQTmg=Ra;@U#9vMC-+rqt_u{V{N#)qGa6-+A|KINGykYhGfelMems^6xGx?W(jMV z@=5{Ao|#?>TeIXh?hM+Tf0>|lH;v3G$oXW@!=r+y8U3_YYm--jMyRy3oaD7(?TsL@ z-S1BjI9Er$|HDG7V$_y4ACO$Y*{Yy-@yZ48N3{iSX zz7hZQ{pQd|%B-RKS!u?+*tyX`FO<9_Y6WqNo?in*>LJ9^lR+Dus+*v#d&Tazz&=B> zS7D4_H=;qHYP4GzGV++R4)i1yUG37wHO^ubVC4<=`(AhjCHLND8fX=V?NffW(M)(#$o zbFr9dPnZ%iclAg^+sZOW$VY^M*>5YVw^D2qa1dE>rHY4omOrvbSe|8m((2;<_F6bd zk3HTB7#F0I{-*1+B4YA$15{vCyt>wT-NiWep_b^EyaD$>*%vQNF+d`gwJ;rg61J8P<-Zn(3v(U)0mFnvA=$+4uHO0t8fke@bE z+gufr7Y>iPD$zAH=3$ktMA{mKY>)z*bNaTzLg-99`88yfE^TUQeCvNlSAfGhAJAtd z<6M;I%XgtQ+{~|Mgxy$HD?=j74?~XQ-_#4-3_YvE9dfoC+UXIF9Yx!j(U+5~{owc( z;Xfu%8fTV2Miw8%R>1Q4v3Eg&H+0{rH$Oo3$>6s@(ll>lHCi=D(w#{pZaBy zj}mT0R_D{bWrpt8GUks|SG>K)(J9o%pi7~DzIi}XW&Oe$Z~AZ#ITQ|c0dAax|DN77 zv?bP*mI|lb0?cFGioz{$Q@adEwS(Uj0flp2F@IT~$Ni*{OdCtBFE?wkn?S!4?R${& zm`{h_l&!V;ISV+}MIF94r)pJD=cBD?CSE1qd#uT}AX)#tw3sRLnW&i910p+qCNi8j z3&Of9-Vmg&hLw!`Rf`91XY)t& zSymJZ_oP&uZ;jM1T8i`u$$MH6k_DW*-YQ-5#__X+wq7~Db$iy9P=ngkPNxVW6xlFH znODMA&Dk?MJ($&9E`@J4_T1D6TQCqb6vYbULT=BkP1KZV#FueYXT^{MS^et<2e0xG z8J&l=Ejx%CaNAg6rloXUGXc`k8ynsDI?0O?S^>%~)zP$- zlHFzc-_xp4Ew)HGg9PfSlv5gS!_<+jn&j482^orLzNLmj9|x26RHxa$F%xq-S(Pot*AxqNjYP-pWox&{XnK;rfc3#p;6z{kBS;uoy zH1SDE3h&g?u8uUaPUYJ;$A?DhZ+Tz9WRtydlSl>#rYPcwBD#_mWw+uvPx3xO7zQei zlu@*d6boEKN z*(j(mVN5ox)+c7bwRlX#u-77Mr3NVGpIH!FSUA13#?0b>A*Yo(8*eFyokRRU#wcO( z#?Fsv0@o?a#E0~!O-Bu1{xfpb@sLC%tS$(oNj1}+hTTf{K@wvJelhXMphUve+?o}R zQSe2eArUF!$LK3%Uqj2hWR^1N_f#JS;7^YF(Fok8SFoN%TjlX2-ldel-78w7V2Y0K zq4JjJv9tDk$pEFt9=^lxxm_AT0Pl%--P+LOmR zh6?+Uy+R(12waKm@IYX#jzh8WiL(D0_B;E=k`G`!HlK{cr1*eb+Ew8$hPeHq@P7aQEKowuzWg9RZeFia`^T#MUnV*x|G8#&Bi(a diff --git a/docs/theme/docker/static/img/menu_arrow_down.gif b/docs/theme/docker/static/img/menu_arrow_down.gif deleted file mode 100644 index 8257c3846ba839fdea9b07ad21e29062e9454b90..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2103 zcmai0|4$ob7=OxLivnKaq63-1n=&B_kSjA2#LdwbN*S=Glzkx*E!S&%auM3mu)|&Te2V_k|ielfdyR_mn<=coS*luT#J&~=H7np zc|OncexB!j@4bd2y&av2DypKdiG2h?sH>}MY;5%V{ey#pV`F2Qrky->a&~rhetv#& zadCBZ)v_#`OWXhAAFm>Twp-PaU@#UAg#D7g9nQ)x6ZyPWArehyO&t#L@R3oma)~PF z8LFmws!qs8zB?KnV2K#lNmxEU{DnO1{#NVe{&MbraCK5KWFpUF@Z^$)JPCY^$g9bD z-6ZnefxY9XnFp?X7WgS00)Zul(+&_-$szXY{^U5q~Nt zdWL};VZWX?1I+#+@b86*k#69fL={_7dhdwCuXuGj6F&sJ751cRM&d>Lbas5`05D=J z<|cCcSsrk#itDlwL^tVqIBsW@2wPYS}HW~7BSMz4HD1uwbYC~);6wC{XVHb&<+=`ir_c^ir zZc~pJWBJjP+8ZhIXY!i88=;Lqs~2KS8}jov6M9b=79I&@8nFz7wwgK;@ zgPsH_l%sKsmP}b%BavbhrT~8Pq{EdWl{8oscokSw&l5BO+vdEe&2eH%jIulnHgl=Y^@t1n zh1YM@T$`Z-c84F)VDBC8igy9yl6Scj@BP)gjPDY4Lz`wRnuIFVUJAqIwU&wi>k2ND z5*U92F9)sbN;fQNDto%V72>geG{lxeKS2{2OXf=?I#oSjE0>J{fxmR2qR4m5xG^j=r)UXX!Z7 zmOm;*seA+P=Oh2xxE*XeugmcD_(HyJ5`8;;oj$*>pW*d~&({QBC&nIEzVq<7j146y zWh}JC&C_b{JGNU{^b~Y!(5;xtF}y*Dg1P*0T}@|9v0akRy)1U)ji!i^tlZWjCNxd7 zeTJwjhN4d?$u?rILl#K%AZEYjJX>#+Y~~=*o_jdBZkBAL1){GEqS}{Awq_iRPtOuv z*ew_IDJLI!JUeYHDig>+1u7z`(%3`1rV{X=mR$J3l|axVX5y zyu7xyW?7carQLt=~a6k&Q!ddoFBA3&=BGGil)Zq|EM#sdm74p*4 zR7tf|nUIZKXEfT+5;3lmuylO%BYD{U?dGlhrQHAE%A{h*M4oN%WRr$G4Sb5or^z|p zB=X;bz3rr#11{SI{FDxXz!Jlg4(?)j!oedpCmsud!?X%=I*|mP18$lqu}{WhmP>L( zfPzF}C8Ox7EV5=q^=wMj6x-np|Kl!UJ`PT6c@%Ab>M&LrQ$t7a+rWxX>%Fl8{!mVI z4*}Q1ej{fFnf+PdU-MJLoxqJm-X~Lf&#=SyzA&4P9|zt6dr~zc@q&FeGckA+7_r`k zsq7J!2OP_LLpDO_ChdXaZd!@3RfZsrl1f)Ma1(G{I^SEEfa^16hM3(>G@A|$0wWQp zznbWa0@nh+sb^zsGKl6D6fMjqLw@dB&WsjBaH|<@kj;gHc|kGkB9W6@HPi84Czd~G z>hVG>KbBH^A_e|jPP2C-wDA}9e2i&Be*RWM?+yba5C4af8D@ z9A}361x}W)<|Y0Bt`5GRKg753?IiLq@-Oj+`7T)7`3_e>v_z*!lXM?Sojk8m9y2o? z$0&HNHs9dJh-dT@s(kb92T)U}(n6WpA_0lqk|8=<+5$4rf{pT;OB;24?&})YBQEim zU%p*+eU6UW9ezxMefNB;z9oz+zB|Qu-%q|f_^wbVv}v}YX{b`|r7&DxbFm1puHrH& zf$?YXve3GrbkmZiGUsZG9<;)Gpu92oXpqh{70uGJ^qaIKotG|3Ynvu1rrgvf?mg~v z?i=n4?n~}F61gkfRqk8v8h45N%FVg;>N{E7)wc^~HdZcMhK>13o2`&01cjZ#UZG2P zM%X9xyDU+t76L-AupgeC?!swB|E5}08bz}dtI*f2Cu0wRO2d$%(n(0t(O1^v44p>W z(nqBrm9OIieB@snw~cM*4H^D!f0w_LM1P~d-5>DxF}(5c`S-)ujsy%V^axA z2@7p;^EBK0j_ph61fn z%WcZXA4m|>51>U5pFsQpl=zNHd@GI+It1e4ld1}Ys%#_>e}FhgNaYGG*{DUOAgBW^ zsD%=Ts(|=vQYhR3;v50}qp09iAfTm$_waAz=ega@&hD&ZAN!;yZ+1Pq>zR4K`Mo!9 zCKEe5JF1XE3MphSWWRah14oW{^id-%JAPJ|v_;w}s@;iHNh=B4iR*o%sIo#_%ZVFr zbO%5LK27?V&+&balP;3_Mbo7c2Ad`Zh*E;@Z;{pu@F`?eW#16NN6SJ1K&crIv&UJm z$vM&q(t`jRDNDG9;cdQIoX9I?xL_Cyp#XKqi5@tQC zU_j~aqfNeag_98()&oSr)v*}F`(KWynAg- z>X_Nu8tGq20~ospMk2Kgcc%j|f5728w1mqQgk109*;0bSJgh~x-%d{28KZumSO{p| zjsfry(m&DP-zB|;j&CBp0JiW}(!1&W#bE&CGT^*Tx^8F`vls`dh~rrR(P@B^R6f6K zu#k!)pqBTHa;f~ zAZ?sQ;PYns_%i8B@X7t~D$@7BPA(;MCV9t4|~F7x~l`^9mUY(|*DbOFbs zIC;Zv*Y%|TCVedd%})VJ7hy1bl+Hbj@BUcS^%)E(t8AjP6)^|*Zf3=TIkt274HW}? zv`mY43p0#Th+Aj_x-E~;iGi*mh2t#6bC_$cFeROGp&mvLQd@IhSYkM!{a z(#r{qK1KRAI=%sy?{v+8wo#c~hd)fmSUfT6+vE^=YA~0ngYvfTSdTD1oA^DRX;}OvVU$GOw)C}j*CnJLw+A} zjoai!q}PgbCrH2H8h~6T7nWhP^AWUl1wX7CQ@&}8hFX?W(Acaol#&?D(@GnLC_yoZ z0bV&UxL`!xU4SCaAX^y#XbJtGgwcNi?e81?%~$a`QB$Y~moce^>c_=OIT?2s{Z+bP zdo3?Sf95)Ug&26=Ni64OsuyhLS*|r=LBFAMFQ?yD zdlz4P7T+nyl%I_$zhG&UUJfo)n)=-~T%THuVaVg8qrQ)NsYbAG1?eTEpP_T#Bt7gJfJ~a-6eHGxh41Po zq_tC`1DYKoN%?6tUWt(15Cabku+S+0Q)09XES}Luy{rktGF{O*0KzJc8%Ftq0IGEu zrRnx~c&7(MjAl0gigJJFQQl@Ysh(roge8Nz5OW$o>U=BY?=O@38Ly&7CqBHL% z(jD@YeIlexWUMH=yqj%2vzyQ`vxuwNi#mW;QvOx^2Dvn2PNmBMGW);^et$>VC1IMZ zv<8)K3>MCCVhpSds4Zlr)5A}Dn7?&LnX@t${K5_MI772 zX5!wb>ZsF~)nGt9iz`;a2&C)Pg9}m;1{S2ezmI!w3wLK(^>dtdB1YK&9V{&IPUl9M z;FF}iy7;{-4gqKypj}n&(*racnj0bZylmNznWcP*^hfwSA3*trFz|^J0+^qXa`NGI zuz}l1e+{?pY5IFb8M6|sy|ubI&Gveg6_R<0Yc1vUQPNr z*vF^E?_sj?ew_a-Vm~J9;aJa6cr!N;PCP-%)9_EFzdufTGh8*51%S>*DGPAWE=NN! zkJ*&>T`~ZWL4Y_|>*D=UYzJfNWrD$U* zn>n!)&3a0EXg{98tkbST-7Z~4>a;xeEsqd6- z>b?;HpjO5*w&j!L$W&E-ktcVuNyJE2)!?+ayyq&uGpX`j?^A_ARzu#Ugt$$8NF(ns zBv9F7a!tqYq<2U-zQ^DbHW;TB29#THd@2M$I3D*y(hsR#kXpukp1>f@8PH!5Y)Q*W zeBMUieJl$DsuP{68pdEYbR|UvE?SJXO9pG&fbq{*bjBnzV7}_FaSBA-Agqx~0G&=w zoh*t4tVd97`QCX+4PZA6X=Y$!n~v+Dk&%s!vgmm23qou(c2FZ=X~mF%$*_}Y7|;Q9 z_UQ=!vSM6M>Vx_!*^F5fm9DG7)!ZD*{b>?|ne0RemVSK8tlEnwx zYWSe_QpknLh&MB^aDs$bswg@SWeP-ck+?KJP+#wwG(o z2aW19yq+p;Whn2`=kKV&Am)NGO6Re%+1z+Ou=wo_hlZ?Y)TUw#J<26rw`3m;JS$My z0=w&~zq?^lkjx6SHnKY6tn3e}w}T-A4o{_D3D$Fu>Mg%GgzJ}}op=H2DKMOS06Z4~ zq&Q&xH`2p&?c}%s&?rke1a!O{fXK_>4z&#M%sLoUz$$feqTcYUjdRNpz?5btP_IQx zk_;IJJibbof}poK$veKYq{d?{;!gm`yw-wUDBj`lR=8*P>H$tJ#URi*kCd~fuR+X* znbFTlkI4Ok3INI$li`lKg3om$PEk|+>DfFOQOn{zT~mEO!f*nBTwM+B-{fbghpu>D zO}Nu0L)pZ$+bVl~;Mpu`OF^=M?{*-vrFv4n%^`l4jByH6Yj9?hChPKM@ z-~Y7KaN{K%Rs8czDLf-~6Z*K`n&UO9w_5&k)tkuTB^(cHj!cRsG;_A%%=Q z0~!DzfRLxoOa|M{IHa(lcoDvJyI?+ ul > li > a').not(':last').not(':first').click(function(){ - - var index = $.inArray(this.href, openmenus) - - if (index > -1) { - console.log(index); - openmenus.splice(index, 1); - - - $(this).parent().children('ul').slideUp(200, function() { - $(this).parent().removeClass('open'); // toggle after effect - }); - } - else { - openmenus.push(this.href); - - var current = $(this); - - setTimeout(function() { - // $('.sidebar > ul > li').removeClass('current'); - current.parent().addClass('current').addClass('open'); // toggle before effect - current.parent().children('ul').hide(); - current.parent().children('ul').slideDown(200); - }, 100); - } - return false; - }); - - // add class to all those which have children - $('.sidebar > ul > li').not(':last').not(':first').addClass('has-children'); - - - if (doc_version == "") { - $('.version-flyer ul').html('
  • Local
  • '); - } - - if (doc_version == "latest") { - $('.version-flyer .version-note').hide(); - } - - // mark the active documentation in the version widget - $(".version-flyer a:contains('" + doc_version + "')").parent().addClass('active-slug').setAttribute("title", "Current version"); - - - -}); \ No newline at end of file diff --git a/docs/theme/docker/theme.conf b/docs/theme/docker/theme.conf deleted file mode 100755 index 5843e97d70..0000000000 --- a/docs/theme/docker/theme.conf +++ /dev/null @@ -1,11 +0,0 @@ -[theme] -inherit = basic -pygments_style = monokai - -[options] -full_logo = true -textcolor = #444444 -headingcolor = #0c3762 -linkcolor = #8C7B65 -visitedlinkcolor = #AFA599 -hoverlinkcolor = #4e4334 From f87a97f7df838742a602f1984f4552b803e3f92d Mon Sep 17 00:00:00 2001 From: "O.S.Tezer" Date: Thu, 1 May 2014 17:13:34 +0300 Subject: [PATCH 134/219] Improve code/comment/output markings & display consistency This PR aims to increase the consistency across the docs for code blocks and code/comment/output markings. Rule followed here is "what's visible on the screen should be reflected" Issue: - Docs had various code blocks showing: comments, commands & outputs. - All three of these items were inconsistently marked. Some examples as to how this PR aims to introduce improvements: 1. Removed `> ` from in front of the "outputs". Eg, ` > REPOSITORY TAG ID CREATED` replaced with: ` REPOSITORY TAG ID CREATED`. 2. Introduced `$` for commands. Eg, ` sudo chkconfig docker on` replaced with: ` $ sudo chkconfig docker on` 3. Comments: ` > # ` replaced with: ` # `. > Please note: > Due to a vast amount of items reviewed and changed for this PR, there > might be some individually incorrect replacements OR patterns of incorrect > replacements. This PR needs to be reviewed and if there is anything missing, > it should be improved or amended. Closes: https://github.com/dotcloud/docker/issues/5286 Docker-DCO-1.1-Signed-off-by: O.S. Tezer (github: ostezer) --- docs/sources/articles/runmetrics.md | 23 +++-- docs/sources/contributing/devenvironment.md | 23 +++-- .../examples/cfengine_process_management.md | 2 +- docs/sources/examples/couchdb_data_volumes.md | 16 ++-- docs/sources/examples/hello_world.md | 12 +-- docs/sources/examples/mongodb.md | 12 +-- docs/sources/examples/nodejs_web_app.md | 48 +++++------ docs/sources/examples/postgresql_service.md | 8 +- .../sources/examples/running_redis_service.md | 24 +++--- docs/sources/examples/running_riak_service.md | 4 +- docs/sources/examples/using_supervisord.md | 4 +- docs/sources/installation/archlinux.md | 4 +- docs/sources/installation/binaries.md | 12 +-- docs/sources/installation/cruxlinux.md | 22 ++--- docs/sources/installation/fedora.md | 16 ++-- docs/sources/installation/frugalware.md | 4 +- docs/sources/installation/gentoolinux.md | 10 +-- docs/sources/installation/google.md | 10 +-- docs/sources/installation/mac.md | 44 +++++----- docs/sources/installation/openSUSE.md | 12 +-- docs/sources/installation/rackspace.md | 10 +-- docs/sources/installation/rhel.md | 10 +-- docs/sources/installation/ubuntulinux.md | 86 +++++++++---------- docs/sources/installation/windows.md | 2 +- .../introduction/working-with-docker.md | 36 ++++---- .../api/archive/docker_remote_api_v1.4.md | 2 +- .../api/archive/docker_remote_api_v1.5.md | 2 +- .../api/archive/docker_remote_api_v1.6.md | 2 +- .../api/archive/docker_remote_api_v1.7.md | 2 +- .../api/archive/docker_remote_api_v1.8.md | 2 +- .../reference/api/docker_remote_api_v1.10.md | 2 +- .../reference/api/docker_remote_api_v1.11.md | 2 +- .../reference/api/docker_remote_api_v1.9.md | 2 +- .../reference/api/registry_index_spec.md | 2 +- docs/sources/reference/builder.md | 4 +- docs/sources/reference/commandline/cli.md | 16 ++-- docs/sources/reference/run.md | 14 +-- .../sources/use/ambassador_pattern_linking.md | 2 +- docs/sources/use/basics.md | 52 +++++------ docs/sources/use/chef.md | 4 +- docs/sources/use/networking.md | 2 +- docs/sources/use/port_redirection.md | 32 +++---- docs/sources/use/puppet.md | 6 +- docs/sources/use/working_with_volumes.md | 6 +- docs/sources/use/workingwithrepository.md | 6 +- 45 files changed, 307 insertions(+), 309 deletions(-) diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md index 4cc210bb52..50d46047c0 100644 --- a/docs/sources/articles/runmetrics.md +++ b/docs/sources/articles/runmetrics.md @@ -26,7 +26,7 @@ corresponding to existing containers. To figure out where your control groups are mounted, you can run: - grep cgroup /proc/mounts + $ grep cgroup /proc/mounts ## Enumerating Cgroups @@ -287,7 +287,7 @@ an interface) can do some serious accounting. For instance, you can setup a rule to account for the outbound HTTP traffic on a web server: - iptables -I OUTPUT -p tcp --sport 80 + $ iptables -I OUTPUT -p tcp --sport 80 There is no `-j` or `-g` flag, so the rule will just count matched packets and go to the following @@ -295,7 +295,7 @@ rule. Later, you can check the values of the counters, with: - iptables -nxvL OUTPUT + $ iptables -nxvL OUTPUT Technically, `-n` is not required, but it will prevent iptables from doing DNS reverse lookups, which are probably @@ -337,11 +337,11 @@ though. The exact format of the command is: - ip netns exec + $ ip netns exec For example: - ip netns exec mycontainer netstat -i + $ ip netns exec mycontainer netstat -i `ip netns` finds the "mycontainer" container by using namespaces pseudo-files. Each process belongs to one network @@ -369,14 +369,13 @@ measure network usage. From there, you can examine the pseudo-file named control group (i.e. in the container). Pick any one of them. Putting everything together, if the "short ID" of a container is held in -the environment variable `$CID`, then you can do -this: +the environment variable `$CID`, then you can do this: - TASKS=/sys/fs/cgroup/devices/$CID*/tasks - PID=$(head -n 1 $TASKS) - mkdir -p /var/run/netns - ln -sf /proc/$PID/ns/net /var/run/netns/$CID - ip netns exec $CID netstat -i + $ TASKS=/sys/fs/cgroup/devices/$CID*/tasks + $ PID=$(head -n 1 $TASKS) + $ mkdir -p /var/run/netns + $ ln -sf /proc/$PID/ns/net /var/run/netns/$CID + $ ip netns exec $CID netstat -i ## Tips for high-performance metric collection diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index f7c66274e8..bcefa00369 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -32,8 +32,8 @@ Again, you can do it in other ways but you need to do more work. ## Check out the Source - git clone http://git@github.com/dotcloud/docker - cd docker + $ git clone http://git@github.com/dotcloud/docker + $ cd docker To checkout a different revision just use `git checkout` with the name of branch or revision number. @@ -45,7 +45,7 @@ Dockerfile in the current directory. Essentially, it will install all the build and runtime dependencies necessary to build and test Docker. This command will take some time to complete when you first execute it. - sudo make build + $ sudo make build If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment. @@ -54,7 +54,7 @@ build of docker, neatly encapsulated in a standard build environment. To create the Docker binary, run this command: - sudo make binary + $ sudo make binary This will create the Docker binary in `./bundles/-dev/binary/` @@ -65,7 +65,7 @@ The binary is available outside the container in the directory host docker executable with this binary for live testing - for example, on ubuntu: - sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/-dev/binary/docker--dev $(which docker);sudo service docker start + $ sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/-dev/binary/docker--dev $(which docker);sudo service docker start > **Note**: > Its safer to run the tests below before swapping your hosts docker binary. @@ -74,7 +74,7 @@ on ubuntu: To execute the test cases, run this command: - sudo make test + $ sudo make test If the test are successful then the tail of the output should look something like this @@ -105,11 +105,10 @@ something like this PASS ok github.com/dotcloud/docker/utils 0.017s -If $TESTFLAGS is set in the environment, it is passed as extra -arguments to `go test`. You can use this to select certain tests to run, -eg. +If $TESTFLAGS is set in the environment, it is passed as extra arguments +to `go test`. You can use this to select certain tests to run, e.g. - TESTFLAGS=`-run \^TestBuild\$` make test + $ TESTFLAGS=`-run \^TestBuild\$` make test If the output indicates "FAIL" and you see errors like this: @@ -124,7 +123,7 @@ is recommended. You can run an interactive session in the newly built container: - sudo make shell + $ sudo make shell # type 'exit' or Ctrl-D to exit @@ -134,7 +133,7 @@ If you want to read the documentation from a local website, or are making changes to it, you can build the documentation and then serve it by: - sudo make docs + $ sudo make docs # when its done, you can point your browser to http://yourdockerhost:8000 # type Ctrl-C to exit diff --git a/docs/sources/examples/cfengine_process_management.md b/docs/sources/examples/cfengine_process_management.md index 965ad252d2..0c7b6a8a1f 100644 --- a/docs/sources/examples/cfengine_process_management.md +++ b/docs/sources/examples/cfengine_process_management.md @@ -95,7 +95,7 @@ your container with the docker build command, e.g. Start the container with `apache2` and `sshd` running and managed, forwarding a port to our SSH instance: - docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" + $ docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start" We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes as part of a normal `docker run` command. diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 10abe7af02..17490487aa 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -20,28 +20,28 @@ different versions of CouchDB on the same data, etc. Note that we're marking `/var/lib/couchdb` as a data volume. - COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) + $ COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) ## Add data to the first database We're assuming your Docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your Docker host. - HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" - echo "Navigate to $URL in your browser, and use the couch interface to add data" + $ HOST=localhost + $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" + $ echo "Navigate to $URL in your browser, and use the couch interface to add data" ## Create second database This time, we're requesting shared access to `$COUCH1`'s volumes. - COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) + $ COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03) ## Browse data on the second database - HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" - echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' + $ HOST=localhost + $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" + $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' Congratulations, you are now running two Couchdb containers, completely isolated from each other *except* for their data. diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md index ba573527c1..48f4a43102 100644 --- a/docs/sources/examples/hello_world.md +++ b/docs/sources/examples/hello_world.md @@ -80,7 +80,7 @@ continue to do this until we stop it. **Steps:** - CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") + $ CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") We are going to run a simple hello world daemon in a new container made from the `ubuntu` image. @@ -98,7 +98,7 @@ from the `ubuntu` image. - sudo docker logs $container_id + $ sudo docker logs $container_id Check the logs make sure it is working correctly. @@ -107,7 +107,7 @@ Check the logs make sure it is working correctly. - sudo docker attach --sig-proxy=false $container_id + $ sudo docker attach --sig-proxy=false $container_id Attach to the container to see the results in real-time. @@ -120,7 +120,7 @@ Attach to the container to see the results in real-time. Exit from the container attachment by pressing Control-C. - sudo docker ps + $ sudo docker ps Check the process list to make sure it is running. @@ -128,7 +128,7 @@ Check the process list to make sure it is running. - sudo docker stop $container_id + $ sudo docker stop $container_id Stop the container, since we don't need it anymore. @@ -137,7 +137,7 @@ Stop the container, since we don't need it anymore. - sudo docker ps + $ sudo docker ps Make sure it is really stopped. diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md index 36a5a58ad8..4b5f95d023 100644 --- a/docs/sources/examples/mongodb.md +++ b/docs/sources/examples/mongodb.md @@ -21,7 +21,7 @@ apt source and installs the database software on Ubuntu. Create an empty file called Dockerfile: - touch Dockerfile + $ touch Dockerfile Next, define the parent image you want to use to build your own image on top of. Here, we'll use [Ubuntu](https://index.docker.io/_/ubuntu/) @@ -69,21 +69,21 @@ container. Now, lets build the image which will go through the Dockerfile we made and run all of the commands. - sudo docker build -t /mongodb . + $ sudo docker build -t /mongodb . Now you should be able to run `mongod` as a daemon and be able to connect on the local port! # Regular style - MONGO_ID=$(sudo docker run -d /mongodb) + $ MONGO_ID=$(sudo docker run -d /mongodb) # Lean and mean - MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) + $ MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) # Check the logs out - sudo docker logs $MONGO_ID + $ sudo docker logs $MONGO_ID # Connect and play around - mongo --port + $ mongo --port Sweet! diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index f7d63dadcf..bc0e908d2d 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -134,16 +134,16 @@ Go to the directory that has your `Dockerfile` and run the following command to build a Docker image. The `-t` flag let's you tag your image so it's easier to find later using the `docker images` command: - sudo docker build -t /centos-node-hello . + $ sudo docker build -t /centos-node-hello . Your image will now be listed by Docker: - sudo docker images + $ sudo docker images - > # Example - > REPOSITORY TAG ID CREATED - > centos 6.4 539c0211cd76 8 weeks ago - > gasi/centos-node-hello latest d64d3505b0d2 2 hours ago + # Example + REPOSITORY TAG ID CREATED + centos 6.4 539c0211cd76 8 weeks ago + gasi/centos-node-hello latest d64d3505b0d2 2 hours ago ## Run the image @@ -151,44 +151,44 @@ Running your image with `-d` runs the container in detached mode, leaving the container running in the background. The `-p` flag redirects a public port to a private port in the container. Run the image you previously built: - sudo docker run -p 49160:8080 -d /centos-node-hello + $ sudo docker run -p 49160:8080 -d /centos-node-hello Print the output of your app: # Get container ID - sudo docker ps + $ sudo docker ps # Print app output - sudo docker logs + $ sudo docker logs - > # Example - > Running on http://localhost:8080 + # Example + Running on http://localhost:8080 ## Test To test your app, get the the port of your app that Docker mapped: - sudo docker ps + $ sudo docker ps - > # Example - > ID IMAGE COMMAND ... PORTS - > ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080 + # Example + ID IMAGE COMMAND ... PORTS + ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080 In the example above, Docker mapped the `8080` port of the container to `49160`. Now you can call your app using `curl` (install if needed via: `sudo apt-get install curl`): - curl -i localhost:49160 + $ curl -i localhost:49160 - > HTTP/1.1 200 OK - > X-Powered-By: Express - > Content-Type: text/html; charset=utf-8 - > Content-Length: 12 - > Date: Sun, 02 Jun 2013 03:53:22 GMT - > Connection: keep-alive - > - > Hello World + HTTP/1.1 200 OK + X-Powered-By: Express + Content-Type: text/html; charset=utf-8 + Content-Length: 12 + Date: Sun, 02 Jun 2013 03:53:22 GMT + Connection: keep-alive + + Hello World We hope this tutorial helped you get up and running with Node.js and CentOS on Docker. You can get the full source code at diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md index 1a10cd4415..14d9e647a3 100644 --- a/docs/sources/examples/postgresql_service.md +++ b/docs/sources/examples/postgresql_service.md @@ -125,14 +125,14 @@ prompt, you can create a table and populate it. psql (9.3.1) Type "help" for help. - docker=# CREATE TABLE cities ( + $ docker=# CREATE TABLE cities ( docker(# name varchar(80), docker(# location point docker(# ); CREATE TABLE - docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); + $ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); INSERT 0 1 - docker=# select * from cities; + $ docker=# select * from cities; name | location ---------------+----------- San Francisco | (-194,53) @@ -143,7 +143,7 @@ prompt, you can create a table and populate it. You can use the defined volumes to inspect the PostgreSQL log files and to backup your configuration and data: - docker run -rm --volumes-from pg_test -t -i busybox sh + $ docker run -rm --volumes-from pg_test -t -i busybox sh / # ls bin etc lib linuxrc mnt proc run sys usr diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md index 2bfa8a05bc..ca67048625 100644 --- a/docs/sources/examples/running_redis_service.md +++ b/docs/sources/examples/running_redis_service.md @@ -29,7 +29,7 @@ image. Next we build an image from our `Dockerfile`. Replace `` with your own user name. - sudo docker build -t /redis . + $ sudo docker build -t /redis . ## Run the service @@ -42,7 +42,7 @@ Importantly, we're not exposing any ports on our container. Instead we're going to use a container link to provide access to our Redis database. - sudo docker run --name redis -d /redis + $ sudo docker run --name redis -d /redis ## Create your web application container @@ -52,19 +52,19 @@ created with an alias of `db`. This will create a secure tunnel to the `redis` container and expose the Redis instance running inside that container to only this container. - sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash + $ sudo docker run --link redis:db -i -t ubuntu:12.10 /bin/bash Once inside our freshly created container we need to install Redis to get the `redis-cli` binary to test our connection. - apt-get update - apt-get -y install redis-server - service redis-server stop + $ apt-get update + $ apt-get -y install redis-server + $ service redis-server stop As we've used the `--link redis:db` option, Docker has created some environment variables in our web application container. - env | grep DB_ + $ env | grep DB_ # Should return something similar to this with your values DB_NAME=/violet_wolf/db @@ -79,13 +79,13 @@ with `DB`. The `DB` comes from the link alias specified when we launched the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to our Redis container. - redis-cli -h $DB_PORT_6379_TCP_ADDR - redis 172.17.0.33:6379> - redis 172.17.0.33:6379> set docker awesome + $ redis-cli -h $DB_PORT_6379_TCP_ADDR + $ redis 172.17.0.33:6379> + $ redis 172.17.0.33:6379> set docker awesome OK - redis 172.17.0.33:6379> get docker + $ redis 172.17.0.33:6379> get docker "awesome" - redis 172.17.0.33:6379> exit + $ redis 172.17.0.33:6379> exit We could easily use this or other environment variables in our web application to make a connection to our `redis` diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md index 61594f9cd8..852035f9a4 100644 --- a/docs/sources/examples/running_riak_service.md +++ b/docs/sources/examples/running_riak_service.md @@ -19,7 +19,7 @@ Riak pre-installed. Create an empty file called Dockerfile: - touch Dockerfile + $ touch Dockerfile Next, define the parent image you want to use to build your image on top of. We'll use [Ubuntu](https://index.docker.io/_/ubuntu/) (tag: @@ -126,7 +126,7 @@ Populate it with the following program definitions: Now you should be able to build a Docker image for Riak: - docker build -t "/riak" . + $ docker build -t "/riak" . ## Next steps diff --git a/docs/sources/examples/using_supervisord.md b/docs/sources/examples/using_supervisord.md index 8e85ae05d2..29d2fa4525 100644 --- a/docs/sources/examples/using_supervisord.md +++ b/docs/sources/examples/using_supervisord.md @@ -99,13 +99,13 @@ launches. We can now build our new container. - sudo docker build -t /supervisord . + $ sudo docker build -t /supervisord . ## Running our Supervisor container Once We've got a built image we can launch a container from it. - sudo docker run -p 22 -p 80 -t -i /supervisord + $ sudo docker run -p 22 -p 80 -t -i /supervisord 2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file) 2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing 2013-11-25 18:53:22,342 INFO supervisord started with pid 1 diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md index 6e970b96f6..c6d4f73fb8 100644 --- a/docs/sources/installation/archlinux.md +++ b/docs/sources/installation/archlinux.md @@ -60,8 +60,8 @@ have not done so before. There is a systemd service unit created for docker. To start the docker service: - sudo systemctl start docker + $ sudo systemctl start docker To start on system boot: - sudo systemctl enable docker + $ sudo systemctl enable docker diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md index b02c28828b..36aa0ae249 100644 --- a/docs/sources/installation/binaries.md +++ b/docs/sources/installation/binaries.md @@ -46,8 +46,8 @@ Linux kernel (it even builds on OSX!). ## Get the docker binary: - wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker - chmod +x docker + $ wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker + $ chmod +x docker > **Note**: > If you have trouble downloading the binary, you can also get the smaller @@ -58,7 +58,7 @@ Linux kernel (it even builds on OSX!). ## Run the docker daemon # start the docker in daemon mode from the directory you unpacked - sudo ./docker -d & + $ sudo ./docker -d & ## Giving non-root access @@ -87,16 +87,16 @@ all the client commands. To upgrade your manual installation of Docker, first kill the docker daemon: - killall docker + $ killall docker Then follow the regular installation steps. ## Run your first container! # check your docker version - sudo ./docker version + $ sudo ./docker version # run a container and open an interactive shell in the container - sudo ./docker run -i -t ubuntu /bin/bash + $ sudo ./docker run -i -t ubuntu /bin/bash Continue with the [*Hello World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index f37d720389..d1a4de7367 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -39,24 +39,24 @@ do so via: Download the `httpup` file to `/etc/ports/`: - curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup + $ curl -q -o - http://crux.nu/portdb/?a=getup&q=prologic > /etc/ports/prologic.httpup Add `prtdir /usr/ports/prologic` to `/etc/prt-get.conf`: - vim /etc/prt-get.conf + $ vim /etc/prt-get.conf # or: - echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf + $ echo "prtdir /usr/ports/prologic" >> /etc/prt-get.conf Update ports and prt-get cache: - ports -u - prt-get cache + $ ports -u + $ prt-get cache To install (*and its dependencies*): - prt-get depinst docker + $ prt-get depinst docker Use `docker-bin` for the upstream binary or `docker-git` to build and install from the master @@ -70,20 +70,20 @@ and Docker Daemon to work properly. Please read the `README.rst`: - prt-get readme docker + $ prt-get readme docker There is a `test_kernel_config.sh` script in the above ports which you can use to test your Kernel configuration: - cd /usr/ports/prologic/docker - ./test_kernel_config.sh /usr/src/linux/.config + $ cd /usr/ports/prologic/docker + $ ./test_kernel_config.sh /usr/src/linux/.config ## Starting Docker There is a rc script created for Docker. To start the Docker service: - sudo su - - /etc/rc.d/docker start + $ sudo su - + $ /etc/rc.d/docker start To start on system boot: diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md index 70d8c1462e..93b5b05b13 100644 --- a/docs/sources/installation/fedora.md +++ b/docs/sources/installation/fedora.md @@ -30,35 +30,35 @@ report](https://bugzilla.redhat.com/show_bug.cgi?id=1043676) filed for it. To proceed with `docker-io` installation on Fedora 19, please remove `docker` first. - sudo yum -y remove docker + $ sudo yum -y remove docker For Fedora 20 and later, the `wmdocker` package will provide the same functionality as `docker` and will also not conflict with `docker-io`. - sudo yum -y install wmdocker - sudo yum -y remove docker + $ sudo yum -y install wmdocker + $ sudo yum -y remove docker Install the `docker-io` package which will install Docker on our host. - sudo yum -y install docker-io + $ sudo yum -y install docker-io To update the `docker-io` package: - sudo yum -y update docker-io + $ sudo yum -y update docker-io Now that it's installed, let's start the Docker daemon. - sudo systemctl start docker + $ sudo systemctl start docker If we want Docker to start at boot, we should also: - sudo systemctl enable docker + $ sudo systemctl enable docker Now let's verify that Docker is working. - sudo docker run -i -t fedora /bin/bash + $ sudo docker run -i -t fedora /bin/bash **Done!**, now continue with the [*Hello World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md index 1d640cf8fd..eb409d8d39 100644 --- a/docs/sources/installation/frugalware.md +++ b/docs/sources/installation/frugalware.md @@ -49,8 +49,8 @@ is all that is needed. There is a systemd service unit created for Docker. To start Docker as service: - sudo systemctl start lxc-docker + $ sudo systemctl start lxc-docker To start on system boot: - sudo systemctl enable lxc-docker + $ sudo systemctl enable lxc-docker diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md index 49700ea563..92329dca90 100644 --- a/docs/sources/installation/gentoolinux.md +++ b/docs/sources/installation/gentoolinux.md @@ -43,7 +43,7 @@ use flags to pull in the proper dependencies of the major storage drivers, with the "device-mapper" use flag being enabled by default, since that is the simplest installation path. - sudo emerge -av app-emulation/docker + $ sudo emerge -av app-emulation/docker If any issues arise from this ebuild or the resulting binary, including and especially missing kernel configuration flags and/or dependencies, @@ -61,18 +61,18 @@ and/or AUFS, depending on the storage driver you`ve decided to use). To start the docker daemon: - sudo /etc/init.d/docker start + $ sudo /etc/init.d/docker start To start on system boot: - sudo rc-update add docker default + $ sudo rc-update add docker default ### systemd To start the docker daemon: - sudo systemctl start docker.service + $ sudo systemctl start docker.service To start on system boot: - sudo systemctl enable docker.service + $ sudo systemctl enable docker.service diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md index bec7d0ba13..4c22808dcb 100644 --- a/docs/sources/installation/google.md +++ b/docs/sources/installation/google.md @@ -45,19 +45,19 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput $ gcutil ssh docker-playground - docker-playground:~$ + $ docker-playground:~$ 5. Install the latest Docker release and configure it to start when the instance boots: - docker-playground:~$ curl get.docker.io | bash - docker-playground:~$ sudo update-rc.d docker defaults + $ docker-playground:~$ curl get.docker.io | bash + $ docker-playground:~$ sudo update-rc.d docker defaults 6. Start a new container: - docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' - docker on GCE \o/ + $ docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/' + $ docker on GCE \o/ diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 1cef06b55b..15736f5c6c 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -40,7 +40,7 @@ image that is used for the job. If you are using Homebrew on your machine, simply run the following command to install `boot2docker`: - brew install boot2docker + $ brew install boot2docker #### Manual installation @@ -49,13 +49,13 @@ Open up a new terminal window, if you have not already. Run the following commands to get boot2docker: # Enter the installation directory - cd ~/bin + $ cd ~/bin # Get the file - curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker + $ curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker # Mark it executable - chmod +x boot2docker + $ chmod +x boot2docker ### Docker OS X Client @@ -67,25 +67,25 @@ The `docker` daemon is accessed using the Run the following command to install the `docker` client: - brew install docker + $ brew install docker #### Manual installation Run the following commands to get it downloaded and set up: # Get the docker client file - DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ - curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ - gunzip $DIR/ld.tgz && \ - tar xvf $DIR/ld.tar -C $DIR/ && \ - cp $DIR/usr/local/bin/docker ./docker + $ DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ + $ curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ + $ gunzip $DIR/ld.tgz && \ + $ tar xvf $DIR/ld.tar -C $DIR/ && \ + $ cp $DIR/usr/local/bin/docker ./docker # Set the environment variable for the docker daemon - export DOCKER_HOST=tcp://127.0.0.1:4243 + $ export DOCKER_HOST=tcp://127.0.0.1:4243 # Copy the executable file - sudo mkdir -p /usr/local/bin - sudo cp docker /usr/local/bin/ + $ sudo mkdir -p /usr/local/bin + $ sudo cp docker /usr/local/bin/ And that's it! Let's check out how to use it. @@ -97,13 +97,13 @@ Inside the `~/bin` directory, run the following commands: # Initiate the VM - ./boot2docker init + $ ./boot2docker init # Run the VM (the docker daemon) - ./boot2docker up + $ ./boot2docker up # To see all available commands: - ./boot2docker + $ ./boot2docker # Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} @@ -113,7 +113,7 @@ Once the VM with the `docker` daemon is up, you can use the `docker` client just like any other application. - docker version + $ docker version # Client version: 0.7.6 # Go version (client): go1.2 # Git commit (client): bc3b2ec @@ -137,7 +137,7 @@ interact with our containers as if they were running locally: If you feel the need to connect to the VM, you can simply run: - ./boot2docker ssh + $ ./boot2docker ssh # User: docker # Pwd: tcuser @@ -154,7 +154,7 @@ See the GitHub page for ### If SSH complains about keys: - ssh-keygen -R '[localhost]:2022' + $ ssh-keygen -R '[localhost]:2022' ### Upgrading to a newer release of boot2docker @@ -162,9 +162,9 @@ To upgrade an initialised VM, you can use the following 3 commands. Your persistence disk will not be changed, so you won't lose your images and containers: - ./boot2docker stop - ./boot2docker download - ./boot2docker start + $ ./boot2docker stop + $ ./boot2docker download + $ ./boot2docker start ### About the way Docker works on Mac OS X: diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md index 2d7804d291..07f2ca43d2 100644 --- a/docs/sources/installation/openSUSE.md +++ b/docs/sources/installation/openSUSE.md @@ -30,14 +30,14 @@ To proceed with Docker installation please add the right Virtualization repository. # openSUSE 12.3 - sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization + $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization # openSUSE 13.1 - sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization + $ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization Install the Docker package. - sudo zypper in docker + $ sudo zypper in docker It's also possible to install Docker using openSUSE's1-click install. Just visit [this](http://software.opensuse.org/package/docker) page, @@ -47,17 +47,17 @@ the docker package. Now that it's installed, let's start the Docker daemon. - sudo systemctl start docker + $ sudo systemctl start docker If we want Docker to start at boot, we should also: - sudo systemctl enable docker + $ sudo systemctl enable docker The docker package creates a new group named docker. Users, other than root user, need to be part of this group in order to interact with the Docker daemon. - sudo usermod -G docker + $ sudo usermod -G docker **Done!** Now continue with the [*Hello World*]( diff --git a/docs/sources/installation/rackspace.md b/docs/sources/installation/rackspace.md index 8cce292b79..c93af388ed 100644 --- a/docs/sources/installation/rackspace.md +++ b/docs/sources/installation/rackspace.md @@ -29,16 +29,16 @@ you will need to set the kernel manually. **Do not attempt this on a production machine!** # update apt - apt-get update + $ apt-get update # install the new kernel - apt-get install linux-generic-lts-raring + $ apt-get install linux-generic-lts-raring Great, now you have the kernel installed in `/boot/`, next you need to make it boot next time. # find the exact names - find /boot/ -name '*3.8*' + $ find /boot/ -name '*3.8*' # this should return some results @@ -51,7 +51,7 @@ the right files. Take special care to double check the kernel and initrd entries. # now edit /boot/grub/menu.lst - vi /boot/grub/menu.lst + $ vi /boot/grub/menu.lst It will probably look something like this: @@ -78,7 +78,7 @@ Reboot the server (either via command line or console) Verify the kernel was updated - uname -a + $ uname -a # Linux docker-12-04 3.8.0-19-generic #30~precise1-Ubuntu SMP Wed May 1 22:26:36 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux # nice! 3.8. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 874e92adc8..632743a2b9 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -49,23 +49,23 @@ To proceed with `docker-io` installation, please remove `docker` first. Next, let's install the `docker-io` package which will install Docker on our host. - sudo yum -y install docker-io + $ sudo yum -y install docker-io To update the `docker-io` package - sudo yum -y update docker-io + $ sudo yum -y update docker-io Now that it's installed, let's start the Docker daemon. - sudo service docker start + $ sudo service docker start If we want Docker to start at boot, we should also: - sudo chkconfig docker on + $ sudo chkconfig docker on Now let's verify that Docker is working. - sudo docker run -i -t fedora /bin/bash + $ sudo docker run -i -t fedora /bin/bash **Done!** Now continue with the [*Hello World*](/examples/hello_world/#hello-world) example. diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index 04173cf917..d40e17b646 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -33,13 +33,13 @@ installs all its prerequisites from Ubuntu's repository. To install the latest Ubuntu package (may not be the latest Docker release): - sudo apt-get update - sudo apt-get install docker.io - sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker + $ sudo apt-get update + $ sudo apt-get install docker.io + $ sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker To verify that everything has worked as expected: - sudo docker run -i -t ubuntu /bin/bash + $ sudo docker run -i -t ubuntu /bin/bash Which should download the `ubuntu` image, and then start `bash` in a container. @@ -61,11 +61,11 @@ VirtualBox guest additions. If you didn't install the headers for your kernel. But it is safer to include them if you're not sure. # install the backported kernel - sudo apt-get update - sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring + $ sudo apt-get update + $ sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring # reboot - sudo reboot + $ sudo reboot ### Installation @@ -90,7 +90,7 @@ should exist. If it doesn't, you need to install the package Then, add the Docker repository key to your local keychain. - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + $ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 Add the Docker repository to your apt sources list, update and install the `lxc-docker` package. @@ -98,21 +98,21 @@ the `lxc-docker` package. *You may receive a warning that the package isn't trusted. Answer yes to continue installation.* - sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\ + $ sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\ > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker + $ sudo apt-get update + $ sudo apt-get install lxc-docker > **Note**: > > There is also a simple `curl` script available to help with this process. > -> curl -s https://get.docker.io/ubuntu/ | sudo sh +> $ curl -s https://get.docker.io/ubuntu/ | sudo sh Now verify that the installation has worked by downloading the `ubuntu` image and launching a container. - sudo docker run -i -t ubuntu /bin/bash + $ sudo docker run -i -t ubuntu /bin/bash Type `exit` to exit @@ -134,8 +134,8 @@ available as a driver and we recommend using it if you can. To make sure AUFS is installed, run the following commands: - sudo apt-get update - sudo apt-get install linux-image-extra-`uname -r` + $ sudo apt-get update + $ sudo apt-get install linux-image-extra-`uname -r` ### Installation @@ -147,20 +147,20 @@ Docker is available as a Debian package, which makes installation easy. First add the Docker repository key to your local keychain. - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + $ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 Add the Docker repository to your apt sources list, update and install the `lxc-docker` package. - sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\ + $ sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\ > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker + $ sudo apt-get update + $ sudo apt-get install lxc-docker Now verify that the installation has worked by downloading the `ubuntu` image and launching a container. - sudo docker run -i -t ubuntu /bin/bash + $ sudo docker run -i -t ubuntu /bin/bash Type `exit` to exit @@ -194,16 +194,16 @@ than `docker` should own the Unix socket with the **Example:** # Add the docker group if it doesn't already exist. - sudo groupadd docker + $ sudo groupadd docker # Add the connected user "${USER}" to the docker group. # Change the user name to match your preferred user. # You may have to logout and log back in again for # this to take effect. - sudo gpasswd -a ${USER} docker + $ sudo gpasswd -a ${USER} docker # Restart the Docker daemon. - sudo service docker restart + $ sudo service docker restart ### Upgrade @@ -211,28 +211,28 @@ To install the latest version of docker, use the standard `apt-get` method: # update your sources list - sudo apt-get update + $ sudo apt-get update # install the latest - sudo apt-get install lxc-docker + $ sudo apt-get install lxc-docker ## Memory and Swap Accounting If you want to enable memory and swap accounting, you must add the following command-line parameters to your kernel: - cgroup_enable=memory swapaccount=1 + $ cgroup_enable=memory swapaccount=1 On systems using GRUB (which is the default for Ubuntu), you can add those parameters by editing `/etc/default/grub` and extending `GRUB_CMDLINE_LINUX`. Look for the following line: - GRUB_CMDLINE_LINUX="" + $ GRUB_CMDLINE_LINUX="" And replace it by the following one: - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" + $ GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" Then run `sudo update-grub`, and reboot. @@ -247,7 +247,7 @@ On Linux Mint, the `cgroup-lite` package is not installed by default. Before Docker will work correctly, you will need to install this via: - sudo apt-get update && sudo apt-get install cgroup-lite + $ sudo apt-get update && sudo apt-get install cgroup-lite ## Docker and UFW @@ -255,22 +255,22 @@ Docker uses a bridge to manage container networking. By default, UFW drops all forwarding traffic. As a result you will need to enable UFW forwarding: - sudo nano /etc/default/ufw - ---- + $ sudo nano /etc/default/ufw + # Change: # DEFAULT_FORWARD_POLICY="DROP" # to - DEFAULT_FORWARD_POLICY="ACCEPT" + $ DEFAULT_FORWARD_POLICY="ACCEPT" Then reload UFW: - sudo ufw reload + $ sudo ufw reload UFW's default set of rules denies all incoming traffic. If you want to be able to reach your containers from another host then you should allow incoming connections on the Docker port (default 4243): - sudo ufw allow 4243/tcp + $ sudo ufw allow 4243/tcp ## Docker and local DNS server warnings @@ -290,16 +290,16 @@ nameserver and Docker will default to using an external nameserver. This can be worked around by specifying a DNS server to be used by the Docker daemon for the containers: - sudo nano /etc/default/docker + $ sudo nano /etc/default/docker --- # Add: - DOCKER_OPTS="--dns 8.8.8.8" + $ docker_OPTS="--dns 8.8.8.8" # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1 # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1 The Docker daemon has to be restarted: - sudo restart docker + $ sudo restart docker > **Warning**: > If you're doing this on a laptop which connects to various networks, @@ -308,7 +308,7 @@ The Docker daemon has to be restarted: An alternative solution involves disabling dnsmasq in NetworkManager by following these steps: - sudo nano /etc/NetworkManager/NetworkManager.conf + $ sudo nano /etc/NetworkManager/NetworkManager.conf ---- # Change: dns=dnsmasq @@ -317,8 +317,8 @@ following these steps: NetworkManager and Docker need to be restarted afterwards: - sudo restart network-manager - sudo restart docker + $ sudo restart network-manager + $ sudo restart docker > **Warning**: This might make DNS resolution slower on some networks. @@ -336,7 +336,7 @@ Substitute `http://mirror.yandex.ru/mirrors/docker/` for `http://get.docker.io/ubuntu` in the instructions above. For example: - sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\ + $ sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\ > /etc/apt/sources.list.d/docker.list" - sudo apt-get update - sudo apt-get install lxc-docker + $ sudo apt-get update + $ sudo apt-get install lxc-docker diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index a5730862ad..ec633508c4 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -55,7 +55,7 @@ right away. Let's try the “hello world” example. Run - docker run busybox echo hello world + $ docker run busybox echo hello world This will download the small busybox image and print hello world. diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index e76c80cffa..d6bdb2260d 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -60,7 +60,7 @@ The `docker` client usage consists of passing a chain of arguments: # Usage: [sudo] docker [option] [command] [arguments] .. # Example: - docker run -i -t ubuntu /bin/bash + $ docker run -i -t ubuntu /bin/bash ### Our first Docker command @@ -70,7 +70,7 @@ version` command. # Usage: [sudo] docker version # Example: - docker version + $ docker version This command will not only provide you the version of Docker client you are using, but also the version of Go (the programming language powering @@ -97,7 +97,7 @@ binary: # Usage: [sudo] docker # Example: - docker + $ docker You will get an output with all currently available commands. @@ -116,12 +116,12 @@ Try typing Docker followed with a `[command]` to see the instructions: # Usage: [sudo] docker [command] [--help] # Example: - docker attach + $ docker attach Help outputs . . . Or you can pass the `--help` flag to the `docker` binary. - docker images --help + $ docker images --help You will get an output with all available options: @@ -156,12 +156,12 @@ image is constructed. # Usage: [sudo] docker search [image name] # Example: - docker search nginx + $ docker search nginx NAME DESCRIPTION STARS OFFICIAL TRUSTED - dockerfile/nginx Trusted Nginx (http://nginx.org/) Build 6 [OK] + $ dockerfile/nginx Trusted Nginx (http://nginx.org/) Build 6 [OK] paintedfox/nginx-php5 A docker image for running Nginx with PHP5. 3 [OK] - dockerfiles/django-uwsgi-nginx Dockerfile and configuration files to buil... 2 [OK] + $ dockerfiles/django-uwsgi-nginx dockerfile and configuration files to buil... 2 [OK] . . . > **Note:** To learn more about trusted builds, check out [this]( @@ -174,7 +174,7 @@ Downloading a Docker image is called *pulling*. To do this we hence use the # Usage: [sudo] docker pull [image name] # Example: - docker pull dockerfile/nginx + $ docker pull dockerfile/nginx Pulling repository dockerfile/nginx 0ade68db1d05: Pulling dependent layers @@ -193,12 +193,12 @@ In order to get a full list of available images, you can use the # Usage: [sudo] docker images # Example: - docker images + $ docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE myUserName/nginx latest a0d6c70867d2 41 seconds ago 578.8 MB nginx latest 173c2dd28ab2 3 minutes ago 578.8 MB - dockerfile/nginx latest 0ade68db1d05 3 weeks ago 578.8 MB + $ dockerfile/nginx latest 0ade68db1d05 3 weeks ago 578.8 MB ## Working with containers @@ -215,7 +215,7 @@ The easiest way to create a new container is to *run* one from an image. # Usage: [sudo] docker run [arguments] .. # Example: - docker run -d --name nginx_web nginx /usr/sbin/nginx + $ docker run -d --name nginx_web nginx /usr/sbin/nginx This will create a new container from an image called `nginx` which will launch the command `/usr/sbin/nginx` when the container is run. We've @@ -242,10 +242,10 @@ both running and stopped. # Usage: [sudo] docker ps [-a] # Example: - docker ps + $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 842a50a13032 dockerfile/nginx:latest nginx 35 minutes ago Up 30 minutes 0.0.0.0:80->80/tcp nginx_web + 842a50a13032 $ dockerfile/nginx:latest nginx 35 minutes ago Up 30 minutes 0.0.0.0:80->80/tcp nginx_web ### Stopping a container @@ -254,7 +254,7 @@ end the active process. # Usage: [sudo] docker stop [container ID] # Example: - docker stop nginx_web + $ docker stop nginx_web nginx_web If the `docker stop` command succeeds it will return the name of @@ -266,7 +266,7 @@ Stopped containers can be started again. # Usage: [sudo] docker start [container ID] # Example: - docker start nginx_web + $ docker start nginx_web nginx_web If the `docker start` command succeeds it will return the name of the @@ -358,7 +358,7 @@ Docker uses the `Dockerfile` to build images. The build process is initiated by # Use the Dockerfile at the current location # Usage: [sudo] docker build . # Example: - docker build -t="my_nginx_image" . + $ docker build -t="my_nginx_image" . Uploading context 25.09 kB Uploading context @@ -385,7 +385,7 @@ image, here `my_nginx_image`. We can see our new image using the `docker images` command. - docker images + $ docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE my_nginx_img latest 626e92c5fab1 57 seconds ago 337.6 MB diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md index f31f87e55f..2e7e94f7d4 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.4.md @@ -1127,4 +1127,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md index 1d0b7e203f..08457bfd94 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.5.md @@ -1134,4 +1134,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md index ebf2843e93..bca09a3a0e 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.6.md @@ -1236,4 +1236,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md index 0f18e09d0a..818fbba11c 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.7.md @@ -1230,4 +1230,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md index 53a8e4d7e1..0d2997693c 100644 --- a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/archive/docker_remote_api_v1.8.md @@ -1276,4 +1276,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md index bbf3592cfc..721244b49e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.md +++ b/docs/sources/reference/api/docker_remote_api_v1.10.md @@ -1297,4 +1297,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 40dee1af63..59e07a46b8 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -1301,4 +1301,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index bc62b20ac4..d8be62a7a7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -1313,4 +1313,4 @@ stdout and stderr on the same socket. This might change in the future. To enable cross origin requests to the remote api add the flag "–api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" --api-enable-cors + $ docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/registry_index_spec.md b/docs/sources/reference/api/registry_index_spec.md index fb5617d101..93ba469221 100644 --- a/docs/sources/reference/api/registry_index_spec.md +++ b/docs/sources/reference/api/registry_index_spec.md @@ -111,7 +111,7 @@ supports: It's possible to run: - docker pull https:///repositories/samalba/busybox + $ docker pull https:///repositories/samalba/busybox In this case, Docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won't be any diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index 3e278425c2..98e9e0f544 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -18,7 +18,7 @@ This file will describe the steps to assemble the image. Then call `docker build` with the path of you source repository as argument (for example, `.`): - sudo docker build . + $ sudo docker build . The path to the source repository defines where to find the *context* of the build. The build is run by the Docker daemon, not by the CLI, so the @@ -28,7 +28,7 @@ whole context must be transferred to the daemon. The Docker CLI reports You can specify a repository and tag at which to save the new image if the build succeeds: - sudo docker build -t shykes/myapp . + $ sudo docker build -t shykes/myapp . The Docker daemon will run your steps one-by-one, committing the result to a new image if necessary, before finally outputting the ID of your diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 75a5be33b6..ef5c3bc1f7 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -35,11 +35,11 @@ will set the value to the opposite of the default value. Options like `-a=[]` indicate they can be specified multiple times: - docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash + $ docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash Sometimes this can use a more complex value string, as for `-v`: - docker run -v /host:/container example/mysql + $ docker run -v /host:/container example/mysql ### Strings and Integers @@ -100,10 +100,10 @@ To use lxc as the execution driver, use `docker -d -e lxc`. The docker client will also honor the `DOCKER_HOST` environment variable to set the `-H` flag for the client. - docker -H tcp://0.0.0.0:4243 ps + $ docker -H tcp://0.0.0.0:4243 ps # or - export DOCKER_HOST="tcp://0.0.0.0:4243" - docker ps + $ export DOCKER_HOST="tcp://0.0.0.0:4243" + $ docker ps # both are equal To run the daemon with [systemd socket activation]( @@ -448,7 +448,7 @@ by default. 77af4d6b9913 19 hours ago 1.089 GB committest latest b6fa739cedf5 19 hours ago 1.089 GB 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB + $ docker latest 30557a29d5ab 20 hours ago 1.089 GB 0124422dd9f9 20 hours ago 1.089 GB 18ad6fad3402 22 hours ago 1.082 GB f9f1e26352f0 23 hours ago 1.089 GB @@ -462,7 +462,7 @@ by default. 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB - docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + $ docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB @@ -640,7 +640,7 @@ If you want to login to a private registry you can specify this by adding the server name. example: - docker login localhost:8080 + $ docker login localhost:8080 ## logs diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 9de08ec1a6..a8acb97071 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -22,7 +22,7 @@ running containers, and so here we try to give more in-depth guidance. As you`ve seen in the [*Examples*](/examples/#example-list), the basic run command takes this form: - docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + $ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] To learn how to interpret the types of `[OPTIONS]`, see [*Option types*](/commandline/cli/#cli-options). @@ -99,7 +99,7 @@ https://github.com/dotcloud/docker/blob/ of the three standard streams (`stdin`, `stdout`, `stderr`) you'd like to connect instead, as in: - docker run -a stdin -a stdout -i -t ubuntu /bin/bash + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash For interactive processes (like a shell) you will typically want a tty as well as persistent standard input (`stdin`), so you'll use `-i -t` together in most @@ -233,7 +233,7 @@ Dockerfile instruction and how the operator can override that setting. Recall the optional `COMMAND` in the Docker commandline: - docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + $ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] This command is optional because the person who created the `IMAGE` may have already provided a default `COMMAND` using the Dockerfile `CMD`. As the @@ -259,12 +259,12 @@ runtime by using a string to specify the new `ENTRYPOINT`. Here is an example of how to run a shell in a container that has been set up to automatically run something else (like `/usr/bin/redis-server`): - docker run -i -t --entrypoint /bin/bash example/redis + $ docker run -i -t --entrypoint /bin/bash example/redis or two examples of how to pass more parameters to that ENTRYPOINT: - docker run -i -t --entrypoint /bin/bash example/redis -c ls -l - docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help + $ docker run -i -t --entrypoint /bin/bash example/redis -c ls -l + $ docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help ## EXPOSE (Incoming Ports) @@ -335,7 +335,7 @@ container running Redis: # The redis-name container exposed port 6379 $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name + 4241164edf6f $ dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name # Note that there are no public ports exposed since we didn᾿t use -p or -P $ docker port 4241164edf6f 6379 diff --git a/docs/sources/use/ambassador_pattern_linking.md b/docs/sources/use/ambassador_pattern_linking.md index a04dbdffc0..2bdd434f6e 100644 --- a/docs/sources/use/ambassador_pattern_linking.md +++ b/docs/sources/use/ambassador_pattern_linking.md @@ -146,7 +146,7 @@ remote IP and port - in this case `192.168.1.52:6379`. # then to run it (on the host that has the real backend on it) # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador # on the remote host, you can set up another ambassador - # docker run -t -i -name redis_ambassador -expose 6379 sh + # docker run -t -i -name redis_ambassador -expose 6379 sh FROM docker-ut MAINTAINER SvenDowideit@home.org.au diff --git a/docs/sources/use/basics.md b/docs/sources/use/basics.md index b9d52877e4..ee3eeabd9d 100644 --- a/docs/sources/use/basics.md +++ b/docs/sources/use/basics.md @@ -10,7 +10,7 @@ This guide assumes you have a working installation of Docker. To check your Docker install, run the following command: # Check that you have a working install - docker info + $ docker info If you get `docker: command not found` or something like `/var/lib/docker/repositories: permission denied` @@ -23,7 +23,7 @@ for installation instructions. ## Download a pre-built image # Download an ubuntu image - sudo docker pull ubuntu + $ sudo docker pull ubuntu This will find the `ubuntu` image by name on [*Docker.io*](../workingwithrepository/#find-public-images-on-dockerio) and @@ -46,7 +46,7 @@ cache. # To detach the tty without exiting the shell, # use the escape sequence Ctrl-p + Ctrl-q # note: This will continue to exist in a stopped state once exited (see "docker ps -a") - sudo docker run -i -t ubuntu /bin/bash + $ sudo docker run -i -t ubuntu /bin/bash ## Bind Docker to another host/port or a Unix socket @@ -87,70 +87,70 @@ when no `-H` was passed in. `host[:port]` or `:port` # Run docker in daemon mode - sudo /docker -H 0.0.0.0:5555 -d & + $ sudo /docker -H 0.0.0.0:5555 -d & # Download an ubuntu image - sudo docker -H :5555 pull ubuntu + $ sudo docker -H :5555 pull ubuntu You can use multiple `-H`, for example, if you want to listen on both TCP and a Unix socket # Run docker in daemon mode - sudo /docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d & + $ sudo /docker -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -d & # Download an ubuntu image, use default Unix socket - sudo docker pull ubuntu + $ sudo docker pull ubuntu # OR use the TCP port - sudo docker -H tcp://127.0.0.1:4243 pull ubuntu + $ sudo docker -H tcp://127.0.0.1:4243 pull ubuntu ## Starting a long-running worker process # Start a very useful long-running process - JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") + $ JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") # Collect the output of the job so far - sudo docker logs $JOB + $ sudo docker logs $JOB # Kill the job - sudo docker kill $JOB + $ sudo docker kill $JOB ## Listing containers - sudo docker ps # Lists only running containers - sudo docker ps -a # Lists all containers + $ sudo docker ps # Lists only running containers + $ sudo docker ps -a # Lists all containers ## Controlling containers # Start a new container - JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") + $ JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done") # Stop the container - docker stop $JOB + $ docker stop $JOB # Start the container - docker start $JOB + $ docker start $JOB # Restart the container - docker restart $JOB + $ docker restart $JOB # SIGKILL a container - docker kill $JOB + $ docker kill $JOB # Remove a container - docker stop $JOB # Container must be stopped to remove it - docker rm $JOB + $ docker stop $JOB # Container must be stopped to remove it + $ docker rm $JOB ## Bind a service on a TCP port # Bind port 4444 of this container, and tell netcat to listen on it - JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444) + $ JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444) # Which public port is NATed to my container? - PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }') + $ PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }') # Connect to the public port - echo hello world | nc 127.0.0.1 $PORT + $ echo hello world | nc 127.0.0.1 $PORT # Verify that the network connection worked - echo "Daemon received: $(sudo docker logs $JOB)" + $ echo "Daemon received: $(sudo docker logs $JOB)" ## Committing (saving) a container state @@ -163,10 +163,10 @@ will be stored (as a diff). See which images you already have using the `docker images` command. # Commit your container to a new named image - sudo docker commit + $ sudo docker commit # List your containers - sudo docker images + $ sudo docker images You now have a image state from which you can create new instances. diff --git a/docs/sources/use/chef.md b/docs/sources/use/chef.md index 5145107a38..897c2b429a 100644 --- a/docs/sources/use/chef.md +++ b/docs/sources/use/chef.md @@ -43,7 +43,7 @@ The next step is to pull a Docker image. For this, we have a resource: This is equivalent to running: - docker pull samalba/docker-registry + $ docker pull samalba/docker-registry There are attributes available to control how long the cookbook will allow for downloading (5 minute default). @@ -68,7 +68,7 @@ managed by Docker. This is equivalent to running the following command, but under upstart: - docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry + $ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry The resources will accept a single string or an array of values for any docker flags that allow multiple values. diff --git a/docs/sources/use/networking.md b/docs/sources/use/networking.md index 2249ca42cd..00d0684256 100644 --- a/docs/sources/use/networking.md +++ b/docs/sources/use/networking.md @@ -84,7 +84,7 @@ In this scenario: inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0 # Run a container - $ docker run -i -t base /bin/bash + docker run -i -t base /bin/bash # Container IP in the 192.168.227/24 range root@261c272cd7d5:/# ifconfig eth0 diff --git a/docs/sources/use/port_redirection.md b/docs/sources/use/port_redirection.md index ef0e644ace..9f2ce98eae 100644 --- a/docs/sources/use/port_redirection.md +++ b/docs/sources/use/port_redirection.md @@ -11,7 +11,7 @@ port. When this service runs inside a container, one can connect to the port after finding the IP address of the container as follows: # Find IP address of container with ID - docker inspect | grep IPAddress | cut -d '"' -f 4 + $ docker inspect | grep IPAddress | cut -d '"' -f 4 However, this IP address is local to the host system and the container port is not reachable by the outside world. Furthermore, even if the @@ -40,7 +40,7 @@ To bind a port of the container to a specific interface of the host system, use the `-p` parameter of the `docker run` command: # General syntax - docker run -p [([:[host_port]])|():][/udp] + $ docker run -p [([:[host_port]])|():][/udp] When no host interface is provided, the port is bound to all available interfaces of the host machine (aka INADDR_ANY, or 0.0.0.0). When no @@ -48,32 +48,32 @@ host port is provided, one is dynamically allocated. The possible combinations of options for TCP port are the following: # Bind TCP port 8080 of the container to TCP port 80 on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1:80:8080 + $ docker run -p 127.0.0.1:80:8080 # Bind TCP port 8080 of the container to a dynamically allocated TCP port on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1::8080 + $ docker run -p 127.0.0.1::8080 # Bind TCP port 8080 of the container to TCP port 80 on all available interfaces of the host machine. - docker run -p 80:8080 + $ docker run -p 80:8080 # Bind TCP port 8080 of the container to a dynamically allocated TCP port on all available interfaces of the host machine. - docker run -p 8080 + $ docker run -p 8080 UDP ports can also be bound by adding a trailing `/udp`. All the combinations described for TCP work. Here is only one example: # Bind UDP port 5353 of the container to UDP port 53 on 127.0.0.1 of the host machine. - docker run -p 127.0.0.1:53:5353/udp + $ docker run -p 127.0.0.1:53:5353/udp The command `docker port` lists the interface and port on the host machine bound to a given container port. It is useful when using dynamically allocated ports: # Bind to a dynamically allocated port - docker run -p 127.0.0.1::8080 --name dyn-bound + $ docker run -p 127.0.0.1::8080 --name dyn-bound # Lookup the actual port - docker port dyn-bound 8080 + $ docker port dyn-bound 8080 127.0.0.1:49160 ## Linking a container @@ -99,24 +99,24 @@ exposure is done either through the `--expose` parameter to the `docker run` command, or the `EXPOSE` build command in a Dockerfile: # Expose port 80 - docker run --expose 80 --name server + $ docker run --expose 80 --name server The `client` then links to the `server`: # Link - docker run --name client --link server:linked-server + $ docker run --name client --link server:linked-server `client` locally refers to `server` as `linked-server`. The following environment variables, among others, are available on `client`: # The default protocol, ip, and port of the service running in the container - LINKED-SERVER_PORT=tcp://172.17.0.8:80 + $ LINKED-SERVER_PORT=tcp://172.17.0.8:80 # A specific protocol, ip, and port of various services - LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80 - LINKED-SERVER_PORT_80_TCP_PROTO=tcp - LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 - LINKED-SERVER_PORT_80_TCP_PORT=80 + $ LINKED-SERVER_PORT_80_TCP=tcp://172.17.0.8:80 + $ LINKED-SERVER_PORT_80_TCP_PROTO=tcp + $ LINKED-SERVER_PORT_80_TCP_ADDR=172.17.0.8 + $ LINKED-SERVER_PORT_80_TCP_PORT=80 This tells `client` that a service is running on port 80 of `server` and that `server` is accessible at the IP address 172.17.0.8 diff --git a/docs/sources/use/puppet.md b/docs/sources/use/puppet.md index c1ac95f4ab..a0d20ab446 100644 --- a/docs/sources/use/puppet.md +++ b/docs/sources/use/puppet.md @@ -23,7 +23,7 @@ The module is available on the [Puppet Forge](https://forge.puppetlabs.com/garethr/docker/) and can be installed using the built-in module tool. - puppet module install garethr/docker + $ puppet module install garethr/docker It can also be found on [GitHub](https://github.com/garethr/garethr-docker) if you would @@ -47,7 +47,7 @@ defined type which can be used like so: This is equivalent to running: - docker pull ubuntu + $ docker pull ubuntu Note that it will only be downloaded if an image of that name does not already exist. This is downloading a large binary so on first run can @@ -71,7 +71,7 @@ managed by Docker. This is equivalent to running the following command, but under upstart: - docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" + $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" Run also contains a number of optional parameters: diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md index 5817309e62..c403532bcc 100644 --- a/docs/sources/use/working_with_volumes.md +++ b/docs/sources/use/working_with_volumes.md @@ -50,8 +50,8 @@ not. Or, you can use the VOLUME instruction in a Dockerfile to add one or more new volumes to any container created from that image: - # BUILD-USING: docker build -t data . - # RUN-USING: docker run -name DATA data + # BUILD-USING: $ docker build -t data . + # RUN-USING: $ docker run -name DATA data FROM busybox VOLUME ["/var/volume1", "/var/volume2"] CMD ["/bin/true"] @@ -108,7 +108,7 @@ For example: # Usage: # sudo docker run [OPTIONS] -v /(dir. on host):/(dir. in container):(Read-Write or Read-Only) [ARG..] # Example: - sudo docker run -i -t -v /var/log:/logs_from_host:ro ubuntu bash + $ sudo docker run -i -t -v /var/log:/logs_from_host:ro ubuntu bash The command above mounts the host directory `/var/log` into the container with *read only* permissions as `/logs_from_host`. diff --git a/docs/sources/use/workingwithrepository.md b/docs/sources/use/workingwithrepository.md index e3daf05fc7..07f130a909 100644 --- a/docs/sources/use/workingwithrepository.md +++ b/docs/sources/use/workingwithrepository.md @@ -109,7 +109,7 @@ share one of your own images, then you must register a unique user name first. You can create your username and login on [Docker.io](https://index.docker.io/account/signup/), or by running - sudo docker login + $ sudo docker login This will prompt you for a username, which will become a public namespace for your public repositories. @@ -199,10 +199,10 @@ identify a host), like this: # Tag to create a repository with the full registry location. # The location (e.g. localhost.localdomain:5000) becomes # a permanent part of the repository name - sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name + $ sudo docker tag 0u812deadbeef localhost.localdomain:5000/repo_name # Push the new repository to its home location on localhost - sudo docker push localhost.localdomain:5000/repo_name + $ sudo docker push localhost.localdomain:5000/repo_name Once a repository has your registry's host name as part of the tag, you can push and pull it like any other repository, but it will **not** be From d1297feef8b124e69efc99a58294f498ecb8c022 Mon Sep 17 00:00:00 2001 From: Alexandr Morozov Date: Wed, 2 Apr 2014 23:26:06 +0400 Subject: [PATCH 135/219] Timestamps for docker logs. Fixes #1165 Docker-DCO-1.1-Signed-off-by: Alexandr Morozov (github: LK4D4) --- api/client/commands.go | 9 +- api/client/utils.go | 15 ++- api/server/server.go | 43 +++++++ daemon/container.go | 12 ++ .../reference/api/docker_remote_api.md | 4 + .../reference/api/docker_remote_api_v1.11.md | 36 ++++++ docs/sources/reference/commandline/cli.md | 7 +- integration-cli/docker_cli_logs_test.go | 95 +++++++++++++++ server/server.go | 91 ++++++++++++++ utils/utils.go | 112 +++++++++++++----- 10 files changed, 386 insertions(+), 38 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 415bddaac4..89f9b0a4c4 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1583,6 +1583,7 @@ func (cli *DockerCli) CmdDiff(args ...string) error { func (cli *DockerCli) CmdLogs(args ...string) error { cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") if err := cmd.Parse(args); err != nil { return nil } @@ -1603,14 +1604,16 @@ func (cli *DockerCli) CmdLogs(args ...string) error { } v := url.Values{} - v.Set("logs", "1") v.Set("stdout", "1") v.Set("stderr", "1") + if *times { + v.Set("timestamps", "1") + } if *follow && container.State.Running { - v.Set("stream", "1") + v.Set("follow", "1") } - if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { + if err := cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { return err } return nil diff --git a/api/client/utils.go b/api/client/utils.go index 4ef09ba783..7f7498dee7 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -130,6 +130,10 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { + return cli.streamHelper(method, path, true, in, out, nil, headers) +} + +func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error { if (method == "POST" || method == "PUT") && in == nil { in = bytes.NewReader([]byte{}) } @@ -184,9 +188,16 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h } if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { - return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) + return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.terminalFd, cli.isTerminal) } - if _, err := io.Copy(out, resp.Body); err != nil { + if stdout != nil || stderr != nil { + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, resp.Body) + } else { + _, err = utils.StdCopy(stdout, stderr, resp.Body) + } + utils.Debugf("[stream] End of stdout") return err } return nil diff --git a/api/server/server.go b/api/server/server.go index 279d297965..5db9df1901 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -328,6 +328,48 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo return nil } +func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + job = eng.Job("inspect", vars["name"], "container") + c, err = job.Stdout.AddEnv() + ) + if err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + var outStream, errStream io.Writer + outStream = utils.NewWriteFlusher(w) + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = utils.NewStdWriter(outStream, utils.Stderr) + outStream = utils.NewStdWriter(outStream, utils.Stdout) + } else { + errStream = outStream + } + + job = eng.Job("logs", vars["name"]) + job.Setenv("follow", r.Form.Get("follow")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Setenv("timestamps", r.Form.Get("timestamps")) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { + fmt.Fprintf(outStream, "Error: %s\n", err) + } + return nil +} + func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err @@ -1017,6 +1059,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st "/containers/{name:.*}/changes": getContainersChanges, "/containers/{name:.*}/json": getContainersByName, "/containers/{name:.*}/top": getContainersTop, + "/containers/{name:.*}/logs": getContainersLogs, "/containers/{name:.*}/attach/ws": wsContainersAttach, }, "POST": { diff --git a/daemon/container.go b/daemon/container.go index 5e4b72bf12..1c6dc077dc 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -473,6 +473,18 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) { return utils.NewBufReader(reader), nil } +func (container *Container) StdoutLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + container.stdout.AddWriter(writer, "stdout") + return utils.NewBufReader(reader) +} + +func (container *Container) StderrLogPipe() io.ReadCloser { + reader, writer := io.Pipe() + container.stderr.AddWriter(writer, "stderr") + return utils.NewBufReader(reader) +} + func (container *Container) buildHostnameAndHostsFiles(IP string) { container.HostnamePath = path.Join(container.root, "hostname") ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index a6aafbeee8..d6c25c75f2 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -45,6 +45,10 @@ You can still call an old version of the api using You can now use the `-until` parameter to close connection after timestamp. +`GET /containers/(id)/logs` + +This url is prefered method for getting container logs now. + ### v1.10 #### Full Documentation diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 40dee1af63..ddff4e19d0 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -300,6 +300,42 @@ List processes running inside the container `id` - **404** – no such container - **500** – server error +### Get container logs + +`GET /containers/(id)/logs` + +Get stdout and stderr logs from the container ``id`` + + **Example request**: + + GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1 HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {{ STREAM }} + + Query Parameters: + +   + + - **follow** – 1/True/true or 0/False/false, return stream. + Default false + - **stdout** – 1/True/true or 0/False/false, if logs=true, return + stdout log. Default false + - **stderr** – 1/True/true or 0/False/false, if logs=true, return + stderr log. Default false + - **timestamps** – 1/True/true or 0/False/false, if logs=true, print + timestamps for every log line. Default false + + Status Codes: + + - **200** – no error + - **404** – no such container + - **500** – server error + ### Inspect changes on a container's filesystem `GET /containers/(id)/changes` diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 75a5be33b6..49e5860ea9 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -649,13 +649,14 @@ Fetch the logs of a container Usage: docker logs [OPTIONS] CONTAINER -f, --follow=false: Follow log output + -t, --timestamps=false: Show timestamps The `docker logs` command batch-retrieves all logs present at the time of execution. -The `docker logs --follow` command combines `docker logs` and `docker -attach`: it will first return all logs from the beginning and then -continue streaming new output from the container'sstdout and stderr. +The ``docker logs --follow`` command will first return all logs from the +beginning and then continue streaming new output from the container's stdout +and stderr. ## port diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index 8fcf4d7333..75235b6bb8 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -3,7 +3,10 @@ package main import ( "fmt" "os/exec" + "regexp" + "strings" "testing" + "time" ) // This used to work, it test a log of PageSize-1 (gh#4851) @@ -74,3 +77,95 @@ func TestLogsContainerMuchBiggerThanPage(t *testing.T) { logDone("logs - logs container running echo much bigger than page size") } + +func TestLogsTimestamps(t *testing.T) { + testLen := 100 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + lines := strings.Split(out, "\n") + + if len(lines) != testLen+1 { + t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + } + + ts := regexp.MustCompile(`^\[.*?\]`) + + for _, l := range lines { + if l != "" { + _, err := time.Parse("["+time.StampMilli+"]", ts.FindString(l)) + if err != nil { + t.Fatalf("Failed to parse timestamp from %v: %v", l, err) + } + } + } + + deleteContainer(cleanedContainerID) + + logDone("logs - logs with timestamps") +} + +func TestLogsSeparateStderr(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stdout != "" { + t.Fatalf("Expected empty stdout stream, got %v", stdout) + } + + stderr = strings.TrimSpace(stderr) + if stderr != msg { + t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - separate stderr (without pseudo-tty)") +} + +func TestLogsStderrInStdout(t *testing.T) { + msg := "stderr_log" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if stderr != "" { + t.Fatalf("Expected empty stderr stream, got %v", stdout) + } + + stdout = strings.TrimSpace(stdout) + if stdout != msg { + t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) + } + + deleteContainer(cleanedContainerID) + + logDone("logs - stderr in stdout (with pseudo-tty)") +} diff --git a/server/server.go b/server/server.go index 51dd24b3fe..ea3487bf88 100644 --- a/server/server.go +++ b/server/server.go @@ -124,6 +124,7 @@ func InitServer(job *engine.Job) engine.Status { "container_copy": srv.ContainerCopy, "insert": srv.ImageInsert, "attach": srv.ContainerAttach, + "logs": srv.ContainerLogs, "search": srv.ImagesSearch, "changes": srv.ContainerChanges, "top": srv.ContainerTop, @@ -2252,6 +2253,96 @@ func (srv *Server) ContainerResize(job *engine.Job) engine.Status { return job.Errorf("No such container: %s", name) } +func (srv *Server) ContainerLogs(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + follow = job.GetenvBool("follow") + times = job.GetenvBool("timestamps") + format string + ) + if !(stdout || stderr) { + return job.Errorf("You must choose at least one stream") + } + if times { + format = time.StampMilli + } + container := srv.daemon.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + utils.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + utils.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + utils.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + utils.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + utils.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + utils.Errorf("Error reading logs (json): %s", err) + } else { + dec := json.NewDecoder(cLog) + for { + l := &utils.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + utils.Errorf("Error streaming logs: %s", err) + break + } + logLine := l.Log + if times { + logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine) + } + if l.Stream == "stdout" && stdout { + fmt.Fprintf(job.Stdout, "%s", logLine) + } + if l.Stream == "stderr" && stderr { + fmt.Fprintf(job.Stderr, "%s", logLine) + } + } + } + if follow { + errors := make(chan error, 2) + if stdout { + stdoutPipe := container.StdoutLogPipe() + go func() { + errors <- utils.WriteLog(stdoutPipe, job.Stdout, format) + }() + } + if stderr { + stderrPipe := container.StderrLogPipe() + go func() { + errors <- utils.WriteLog(stderrPipe, job.Stderr, format) + }() + } + err := <-errors + if err != nil { + utils.Errorf("%s", err) + } + } + return engine.StatusOK +} + func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) diff --git a/utils/utils.go b/utils/utils.go index 8b6db8c464..066cfbac5a 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -341,18 +341,15 @@ func (r *bufReader) Close() error { type WriteBroadcaster struct { sync.Mutex buf *bytes.Buffer - writers map[StreamWriter]bool -} - -type StreamWriter struct { - wc io.WriteCloser - stream string + streams map[string](map[io.WriteCloser]struct{}) } func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) { w.Lock() - sw := StreamWriter{wc: writer, stream: stream} - w.writers[sw] = true + if _, ok := w.streams[stream]; !ok { + w.streams[stream] = make(map[io.WriteCloser]struct{}) + } + w.streams[stream][writer] = struct{}{} w.Unlock() } @@ -362,33 +359,83 @@ type JSONLog struct { Created time.Time `json:"time"` } +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil +} + +func WriteLog(src io.Reader, dst io.WriteCloser, format string) error { + dec := json.NewDecoder(src) + for { + l := &JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + return nil + } else if err != nil { + Errorf("Error streaming logs: %s", err) + return err + } + line, err := l.Format(format) + if err != nil { + return err + } + fmt.Fprintf(dst, "%s", line) + } +} + +type LogFormatter struct { + wc io.WriteCloser + timeFormat string +} + func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { + created := time.Now().UTC() w.Lock() defer w.Unlock() + if writers, ok := w.streams[""]; ok { + for sw := range writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + delete(writers, sw) + } + } + } w.buf.Write(p) - for sw := range w.writers { - lp := p - if sw.stream != "" { - lp = nil - for { - line, err := w.buf.ReadString('\n') + lines := []string{} + for { + line, err := w.buf.ReadString('\n') + if err != nil { + w.buf.Write([]byte(line)) + break + } + lines = append(lines, line) + } + + if len(lines) != 0 { + for stream, writers := range w.streams { + if stream == "" { + continue + } + var lp []byte + for _, line := range lines { + b, err := json.Marshal(&JSONLog{Log: line, Stream: stream, Created: created}) if err != nil { - w.buf.Write([]byte(line)) - break - } - b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()}) - if err != nil { - // On error, evict the writer - delete(w.writers, sw) - continue + Errorf("Error making JSON log line: %s", err) } lp = append(lp, b...) lp = append(lp, '\n') } - } - if n, err := sw.wc.Write(lp); err != nil || n != len(lp) { - // On error, evict the writer - delete(w.writers, sw) + for sw := range writers { + if _, err := sw.Write(lp); err != nil { + delete(writers, sw) + } + } } } return len(p), nil @@ -397,15 +444,20 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { func (w *WriteBroadcaster) CloseWriters() error { w.Lock() defer w.Unlock() - for sw := range w.writers { - sw.wc.Close() + for _, writers := range w.streams { + for w := range writers { + w.Close() + } } - w.writers = make(map[StreamWriter]bool) + w.streams = make(map[string](map[io.WriteCloser]struct{})) return nil } func NewWriteBroadcaster() *WriteBroadcaster { - return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)} + return &WriteBroadcaster{ + streams: make(map[string](map[io.WriteCloser]struct{})), + buf: bytes.NewBuffer(nil), + } } func GetTotalUsedFds() int { From 24f9187a0467ca66c30e26c3d9e3ee58daeb720f Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Mon, 31 Mar 2014 11:06:39 +0200 Subject: [PATCH 136/219] beam: Add simple framing system for UnixConn This is needed for Send/Recieve to correctly handle borders between the messages. The framing uses a single 32bit uint32 length for each frame, of which the high bit is used to indicate whether the message contains a file descriptor or not. This is enough to separate out each message sent and to decide to which message each file descriptors belongs, even though multiple Sends may be coalesced into a single read, and/or one Send can be split into multiple writes. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- pkg/beam/unix.go | 166 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 136 insertions(+), 30 deletions(-) diff --git a/pkg/beam/unix.go b/pkg/beam/unix.go index b480c47eb9..b2d0d94150 100644 --- a/pkg/beam/unix.go +++ b/pkg/beam/unix.go @@ -21,6 +21,43 @@ func debugCheckpoint(msg string, args ...interface{}) { type UnixConn struct { *net.UnixConn + fds []*os.File +} + +// Framing: +// In order to handle framing in Send/Recieve, as these give frame +// boundaries we use a very simple 4 bytes header. It is a big endiand +// uint32 where the high bit is set if the message includes a file +// descriptor. The rest of the uint32 is the length of the next frame. +// We need the bit in order to be able to assign recieved fds to +// the right message, as multiple messages may be coalesced into +// a single recieve operation. +func makeHeader(data []byte, fds []int) ([]byte, error) { + header := make([]byte, 4) + + length := uint32(len(data)) + + if length > 0x7fffffff { + return nil, fmt.Errorf("Data to large") + } + + if len(fds) != 0 { + length = length | 0x80000000 + } + header[0] = byte((length >> 24) & 0xff) + header[1] = byte((length >> 16) & 0xff) + header[2] = byte((length >> 8) & 0xff) + header[3] = byte((length >> 0) & 0xff) + + return header, nil +} + +func parseHeader(header []byte) (uint32, bool) { + length := uint32(header[0])<<24 | uint32(header[1])<<16 | uint32(header[2])<<8 | uint32(header[3]) + hasFd := length&0x80000000 != 0 + length = length & ^uint32(0x80000000) + + return length, hasFd } func FileConn(f *os.File) (*UnixConn, error) { @@ -33,7 +70,7 @@ func FileConn(f *os.File) (*UnixConn, error) { conn.Close() return nil, fmt.Errorf("%d: not a unix connection", f.Fd()) } - return &UnixConn{uconn}, nil + return &UnixConn{UnixConn: uconn}, nil } @@ -52,7 +89,7 @@ func (conn *UnixConn) Send(data []byte, f *os.File) error { if f != nil { fds = append(fds, int(f.Fd())) } - if err := sendUnix(conn.UnixConn, data, fds...); err != nil { + if err := conn.sendUnix(data, fds...); err != nil { return err } @@ -76,42 +113,104 @@ func (conn *UnixConn) Receive() (rdata []byte, rf *os.File, rerr error) { } debugCheckpoint("===DEBUG=== Receive() -> '%s'[%d]. Hit enter to continue.\n", rdata, fd) }() - for { - data, fds, err := receiveUnix(conn.UnixConn) + + // Read header + header := make([]byte, 4) + nRead := uint32(0) + + for nRead < 4 { + n, err := conn.receiveUnix(header[nRead:]) if err != nil { return nil, nil, err } - var f *os.File - if len(fds) > 1 { - for _, fd := range fds[1:] { - syscall.Close(fd) - } - } - if len(fds) >= 1 { - f = os.NewFile(uintptr(fds[0]), "") - } - return data, f, nil + nRead = nRead + uint32(n) } - panic("impossibru") - return nil, nil, nil + + length, hasFd := parseHeader(header) + + if hasFd { + if len(conn.fds) == 0 { + return nil, nil, fmt.Errorf("No expected file descriptor in message") + } + + rf = conn.fds[0] + conn.fds = conn.fds[1:] + } + + rdata = make([]byte, length) + + nRead = 0 + for nRead < length { + n, err := conn.receiveUnix(rdata[nRead:]) + if err != nil { + return nil, nil, err + } + nRead = nRead + uint32(n) + } + + return } -func receiveUnix(conn *net.UnixConn) ([]byte, []int, error) { - buf := make([]byte, 4096) - oob := make([]byte, 4096) +func (conn *UnixConn) receiveUnix(buf []byte) (int, error) { + oob := make([]byte, syscall.CmsgSpace(4)) bufn, oobn, _, _, err := conn.ReadMsgUnix(buf, oob) if err != nil { - return nil, nil, err + return 0, err } - return buf[:bufn], extractFds(oob[:oobn]), nil + fd := extractFd(oob[:oobn]) + if fd != -1 { + f := os.NewFile(uintptr(fd), "") + conn.fds = append(conn.fds, f) + } + + return bufn, nil } -func sendUnix(conn *net.UnixConn, data []byte, fds ...int) error { - _, _, err := conn.WriteMsgUnix(data, syscall.UnixRights(fds...), nil) - return err +func (conn *UnixConn) sendUnix(data []byte, fds ...int) error { + header, err := makeHeader(data, fds) + if err != nil { + return err + } + + // There is a bug in conn.WriteMsgUnix where it doesn't correctly return + // the number of bytes writte (http://code.google.com/p/go/issues/detail?id=7645) + // So, we can't rely on the return value from it. However, we must use it to + // send the fds. In order to handle this we only write one byte using WriteMsgUnix + // (when we have to), as that can only ever block or fully suceed. We then write + // the rest with conn.Write() + // The reader side should not rely on this though, as hopefully this gets fixed + // in go later. + written := 0 + if len(fds) != 0 { + oob := syscall.UnixRights(fds...) + wrote, _, err := conn.WriteMsgUnix(header[0:1], oob, nil) + if err != nil { + return err + } + written = written + wrote + } + + for written < len(header) { + wrote, err := conn.Write(header[written:]) + if err != nil { + return err + } + written = written + wrote + } + + written = 0 + for written < len(data) { + wrote, err := conn.Write(data[written:]) + if err != nil { + return err + } + written = written + wrote + } + + return nil } -func extractFds(oob []byte) (fds []int) { +func extractFd(oob []byte) int { // Grab forklock to make sure no forks accidentally inherit the new // fds before they are made CLOEXEC // There is a slight race condition between ReadMsgUnix returns and @@ -122,20 +221,27 @@ func extractFds(oob []byte) (fds []int) { defer syscall.ForkLock.Unlock() scms, err := syscall.ParseSocketControlMessage(oob) if err != nil { - return + return -1 } + + foundFd := -1 for _, scm := range scms { - gotFds, err := syscall.ParseUnixRights(&scm) + fds, err := syscall.ParseUnixRights(&scm) if err != nil { continue } - fds = append(fds, gotFds...) for _, fd := range fds { - syscall.CloseOnExec(fd) + if foundFd == -1 { + syscall.CloseOnExec(fd) + foundFd = fd + } else { + syscall.Close(fd) + } } } - return + + return foundFd } func socketpair() ([2]int, error) { From cac0cea03f85191b3d92cdaeae827fdd93fb1b29 Mon Sep 17 00:00:00 2001 From: Eiichi Tsukata Date: Wed, 30 Apr 2014 15:20:22 +0900 Subject: [PATCH 137/219] drop CAP_SYSLOG capability Kernel capabilities for privileged syslog operations are currently splitted into CAP_SYS_ADMIN and CAP_SYSLOG since the following commit: http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=ce6ada35bdf710d16582cc4869c26722547e6f11 This patch drops CAP_SYSLOG to prevent containers from messing with host's syslog (e.g. `dmesg -c` clears up host's printk ring buffer). Closes #5491 Docker-DCO-1.1-Signed-off-by: Eiichi Tsukata (github: Etsukata) Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/lxc/init.go | 1 + daemon/execdriver/native/template/default_template.go | 1 + pkg/libcontainer/container.json | 5 +++++ pkg/libcontainer/types.go | 1 + 4 files changed, 8 insertions(+) diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index 324bd5eff7..52d75fc9f8 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -149,6 +149,7 @@ func setupCapabilities(args *execdriver.InitArgs) error { capability.CAP_MAC_OVERRIDE, capability.CAP_MAC_ADMIN, capability.CAP_NET_ADMIN, + capability.CAP_SYSLOG, } c, err := capability.NewPid(os.Getpid()) diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go index c354637fcb..5dbe21ecb0 100644 --- a/daemon/execdriver/native/template/default_template.go +++ b/daemon/execdriver/native/template/default_template.go @@ -25,6 +25,7 @@ func New() *libcontainer.Container { libcontainer.GetCapability("MAC_ADMIN"), libcontainer.GetCapability("NET_ADMIN"), libcontainer.GetCapability("MKNOD"), + libcontainer.GetCapability("SYSLOG"), }, Namespaces: libcontainer.Namespaces{ libcontainer.GetNamespace("NEWNS"), diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index f15a49ab05..20c1121911 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -91,6 +91,11 @@ "value" : 27, "key" : "MKNOD", "enabled" : true + }, + { + "value" : 34, + "key" : "SYSLOG", + "enabled" : false } ], "networks" : [ diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index ade3c32f1d..f5fe6cffa9 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -53,6 +53,7 @@ var ( {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: false}, {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: false}, {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: false}, + {Key: "SYSLOG", Value: capability.CAP_SYSLOG, Enabled: false}, } ) From fa1e390cad4fd36683e9667795967c711a4867e3 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 1 May 2014 20:51:16 +0000 Subject: [PATCH 138/219] add apparmor to the Dockerfile Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index be2233ff87..bd9f415f2d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -47,6 +47,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ + apparmor \ --no-install-recommends # Get and compile LXC 0.8 (since it is the most stable) From ae686c0486cf6e2c0c394c5eb7a26e7d59cf1472 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 1 May 2014 21:49:53 +0000 Subject: [PATCH 139/219] Revert "add apparmor to the Dockerfile" This reverts commit fa1e390cad4fd36683e9667795967c711a4867e3. Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bd9f415f2d..be2233ff87 100644 --- a/Dockerfile +++ b/Dockerfile @@ -47,7 +47,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ - apparmor \ --no-install-recommends # Get and compile LXC 0.8 (since it is the most stable) From de191e86321f7d3136ff42ff75826b8107399497 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 1 May 2014 21:52:29 +0000 Subject: [PATCH 140/219] skip apparmor with dind Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- hack/dind | 3 +++ pkg/apparmor/apparmor.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hack/dind b/hack/dind index e3641a342f..d4731aff75 100755 --- a/hack/dind +++ b/hack/dind @@ -9,6 +9,9 @@ # Usage: dind CMD [ARG...] +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + # First, make sure that cgroups are mounted correctly. CGROUP=/sys/fs/cgroup diff --git a/pkg/apparmor/apparmor.go b/pkg/apparmor/apparmor.go index 0987398124..6fdb1f8958 100644 --- a/pkg/apparmor/apparmor.go +++ b/pkg/apparmor/apparmor.go @@ -13,7 +13,7 @@ import ( ) func IsEnabled() bool { - if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") return err == nil && len(buf) > 1 && buf[0] == 'Y' } From 1c4202a6142d238d41f10deff1f0548f7591350b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Petazzoni?= Date: Wed, 30 Apr 2014 18:00:42 -0700 Subject: [PATCH 141/219] Mount /proc and /sys read-only, except in privileged containers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It has been pointed out that some files in /proc and /sys can be used to break out of containers. However, if those filesystems are mounted read-only, most of the known exploits are mitigated, since they rely on writing some file in those filesystems. This does not replace security modules (like SELinux or AppArmor), it is just another layer of security. Likewise, it doesn't mean that the other mitigations (shadowing parts of /proc or /sys with bind mounts) are useless. Those measures are still useful. As such, the shadowing of /proc/kcore is still enabled with both LXC and native drivers. Special care has to be taken with /proc/1/attr, which still needs to be mounted read-write in order to enable the AppArmor profile. It is bind-mounted from a private read-write mount of procfs. All that enforcement is done in dockerinit. The code doing the real work is in libcontainer. The init function for the LXC driver calls the function from libcontainer to avoid code duplication. Docker-DCO-1.1-Signed-off-by: Jérôme Petazzoni (github: jpetazzo) --- daemon/execdriver/lxc/driver.go | 5 ++ daemon/execdriver/lxc/lxc_template.go | 23 +++-- daemon/execdriver/native/create.go | 2 - integration-cli/docker_cli_run_test.go | 38 ++++++-- pkg/libcontainer/mount/init.go | 12 +-- pkg/libcontainer/nsinit/init.go | 14 ++- .../security/restrict/restrict.go | 86 ++++++++++++------- 7 files changed, 113 insertions(+), 67 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 6ee7f3c1dd..3fe44202ac 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -5,6 +5,7 @@ import ( "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" "io/ioutil" @@ -35,6 +36,10 @@ func init() { return err } + if err := restrict.Restrict("/", "/empty"); err != nil { + return err + } + if err := setupCapabilities(args); err != nil { return err } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index bc94e7a19d..03d32e72b5 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -82,15 +82,12 @@ lxc.pivotdir = lxc_putold # NOTICE: These mounts must be applied within the namespace -# WARNING: procfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://blog.zx2c4.com/749 +# WARNING: mounting procfs and/or sysfs read-write is a known attack vector. +# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ +# We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. +# We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 - -# WARNING: sysfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://bit.ly/T9CkqJ -{{if .Privileged}} lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 -{{end}} {{if .Tty}} lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 @@ -111,14 +108,14 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS {{if .AppArmor}} lxc.aa_profile = unconfined {{else}} -# not unconfined +# Let AppArmor normal confinement take place (i.e., not unconfined) {{end}} {{else}} -# restrict access to proc -lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/sys none bind,ro 0 0 -lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/irq none bind,ro 0 0 -lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/acpi none bind,ro 0 0 -lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/sysrq-trigger none bind,ro 0 0 +# Restrict access to some stuff in /proc. Note that /proc is already mounted +# read-only, so we don't need to bother about things that are just dangerous +# to write to (like sysrq-trigger). Also, recent kernels won't let a container +# peek into /proc/kcore, but let's cater for people who might run Docker on +# older kernels. Just in case. lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/kcore none bind,ro 0 0 {{end}} diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 00e6fc4b26..6f663f916e 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -84,8 +84,6 @@ func (d *driver) setPrivileged(container *libcontainer.Container) error { } container.Cgroups.DeviceAccess = true - // add sysfs as a mount for privileged containers - container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "sysfs"}) delete(container.Context, "restriction_path") if apparmor.IsEnabled() { diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 83867267ae..b9737feeea 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -725,24 +725,46 @@ func TestUnPrivilegedCannotMount(t *testing.T) { logDone("run - test un-privileged cannot mount") } -func TestSysNotAvaliableInNonPrivilegedContainers(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "busybox", "ls", "/sys/kernel") +func TestSysNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err == nil || code == 0 { - t.Fatal("sys should not be available in a non privileged container") + t.Fatal("sys should not be writable in a non privileged container") } deleteAllContainers() - logDone("run - sys not avaliable in non privileged container") + logDone("run - sys not writable in non privileged container") } -func TestSysAvaliableInPrivilegedContainers(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "ls", "/sys/kernel") +func TestSysWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err != nil || code != 0 { - t.Fatalf("sys should be available in privileged container") + t.Fatalf("sys should be writable in privileged container") } deleteAllContainers() - logDone("run - sys avaliable in privileged container") + logDone("run - sys writable in privileged container") +} + +func TestProcNotWritableInNonPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatal("proc should not be writable in a non privileged container") + } + + deleteAllContainers() + + logDone("run - proc not writable in non privileged container") +} + +func TestProcWritableInPrivilegedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger") + if code, err := runCommand(cmd); err != nil || code != 0 { + t.Fatalf("proc should be writable in privileged container") + } + + deleteAllContainers() + + logDone("run - proc writable in privileged container") } diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index 735970cded..cc3ce2158e 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -11,7 +11,6 @@ import ( "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/mount/nodes" - "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" ) @@ -51,11 +50,6 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co if err := nodes.CopyN(rootfs, nodes.DefaultNodes); err != nil { return fmt.Errorf("copy dev nodes %s", err) } - if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { - if err := restrict.Restrict(rootfs, restrictionPath); err != nil { - return fmt.Errorf("restrict %s", err) - } - } if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil { return err } @@ -124,10 +118,11 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error { } // TODO: this is crappy right now and should be cleaned up with a better way of handling system and -// standard bind mounts allowing them to be more dymanic +// standard bind mounts allowing them to be more dynamic func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount { systemMounts := []mount{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, + {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, } if len(mounts.OfType("devtmpfs")) == 1 { @@ -138,8 +133,5 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, ) - if len(mounts.OfType("sysfs")) == 1 { - systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}) - } return systemMounts } diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index faec12af32..bafb877cd9 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -16,6 +16,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer/mount" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/libcontainer/security/capabilities" + "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" @@ -68,18 +69,25 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, if err := system.Sethostname(container.Hostname); err != nil { return fmt.Errorf("sethostname %s", err) } - if err := FinalizeNamespace(container); err != nil { - return fmt.Errorf("finalize namespace %s", err) - } runtime.LockOSThread() + if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { + if err := restrict.Restrict("/", restrictionPath); err != nil { + return err + } + } + if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { return err } if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { return fmt.Errorf("set process label %s", err) } + + if err := FinalizeNamespace(container); err != nil { + return fmt.Errorf("finalize namespace %s", err) + } return system.Execv(args[0], args[0:], container.Env) } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index 291d6ca5dc..8c08ea1806 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -9,43 +9,67 @@ import ( "github.com/dotcloud/docker/pkg/system" ) -const flags = syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY - -var restrictions = map[string]string{ - // dirs - "/proc/sys": "", - "/proc/irq": "", - "/proc/acpi": "", - - // files - "/proc/sysrq-trigger": "/dev/null", - "/proc/kcore": "/dev/null", +// "restrictions" are container paths (files, directories, whatever) that have to be masked. +// maskPath is a "safe" path to be mounted over maskedPath. It can take two special values: +// - if it is "", then nothing is mounted; +// - if it is "EMPTY", then an empty directory is mounted instead. +// If remountRO is true then the maskedPath is remounted read-only (regardless of whether a maskPath was used). +type restriction struct { + maskedPath string + maskPath string + remountRO bool } -// Restrict locks down access to many areas of proc -// by using the asumption that the user does not have mount caps to -// revert the changes made here -func Restrict(rootfs, empty string) error { - for dest, source := range restrictions { - dest = filepath.Join(rootfs, dest) +var restrictions = []restriction{ + {"/proc", "", true}, + {"/sys", "", true}, + {"/proc/kcore", "/dev/null", false}, +} - // we don't have a "/dev/null" for dirs so have the requester pass a dir - // for us to bind mount - switch source { - case "": - source = empty - default: - source = filepath.Join(rootfs, source) - } - if err := system.Mount(source, dest, "bind", flags, ""); err != nil { - if os.IsNotExist(err) { - continue +// This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). +// However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). +// "empty" should be the path to an empty directory. +func Restrict(rootfs, empty string) error { + for _, restriction := range restrictions { + dest := filepath.Join(rootfs, restriction.maskedPath) + if restriction.maskPath != "" { + var source string + if restriction.maskPath == "EMPTY" { + source = empty + } else { + source = filepath.Join(rootfs, restriction.maskPath) + } + if err := system.Mount(source, dest, "", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("unable to bind-mount %s over %s: %s", source, dest, err) } - return fmt.Errorf("unable to mount %s over %s %s", source, dest, err) } - if err := system.Mount("", dest, "bind", flags|syscall.MS_REMOUNT, ""); err != nil { - return fmt.Errorf("unable to mount %s over %s %s", source, dest, err) + if restriction.remountRO { + if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + return fmt.Errorf("unable to remount %s readonly: %s", dest, err) + } } } + + // This weird trick will allow us to mount /proc read-only, while being able to use AppArmor. + // This is because apparently, loading an AppArmor profile requires write access to /proc/1/attr. + // So we do another mount of procfs, ensure it's write-able, and bind-mount a subset of it. + tmpProcPath := filepath.Join(rootfs, ".proc") + if err := os.Mkdir(tmpProcPath, 0700); err != nil { + return fmt.Errorf("unable to create temporary proc mountpoint %s: %s", tmpProcPath, err) + } + if err := system.Mount("proc", tmpProcPath, "proc", 0, ""); err != nil { + return fmt.Errorf("unable to mount proc on temporary proc mountpoint: %s", err) + } + if err := system.Mount("proc", tmpProcPath, "", syscall.MS_REMOUNT, ""); err != nil { + return fmt.Errorf("unable to remount proc read-write: %s", err) + } + rwAttrPath := filepath.Join(rootfs, ".proc", "1", "attr") + roAttrPath := filepath.Join(rootfs, "proc", "1", "attr") + if err := system.Mount(rwAttrPath, roAttrPath, "", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("unable to bind-mount %s on %s: %s", rwAttrPath, roAttrPath, err) + } + if err := system.Unmount(tmpProcPath, 0); err != nil { + return fmt.Errorf("unable to unmount temporary proc filesystem: %s", err) + } return nil } From 83982e8b1d0cd825e1762b5540db8ae77c34f065 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 30 Apr 2014 19:09:25 -0700 Subject: [PATCH 142/219] Update to enable cross compile Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 1 - pkg/libcontainer/security/restrict/restrict.go | 2 ++ pkg/libcontainer/security/restrict/unsupported.go | 9 +++++++++ 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 pkg/libcontainer/security/restrict/unsupported.go diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index bafb877cd9..90b97a9f99 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -84,7 +84,6 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { return fmt.Errorf("set process label %s", err) } - if err := FinalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index 8c08ea1806..a9bdc4bacb 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -1,3 +1,5 @@ +// +build linux + package restrict import ( diff --git a/pkg/libcontainer/security/restrict/unsupported.go b/pkg/libcontainer/security/restrict/unsupported.go new file mode 100644 index 0000000000..6898baab3d --- /dev/null +++ b/pkg/libcontainer/security/restrict/unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package restrict + +import "fmt" + +func Restrict(rootfs, empty string) error { + return fmt.Errorf("not supported") +} From f5139233b930e436707a65cc032aa2952edd6e4a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 1 May 2014 10:08:18 -0700 Subject: [PATCH 143/219] Update restrictions for better handling of mounts This also cleans up some of the left over restriction paths code from before. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/lxc/driver.go | 60 +++++++---------- daemon/execdriver/lxc/lxc_template.go | 12 +--- daemon/execdriver/native/create.go | 4 +- daemon/execdriver/native/driver.go | 7 -- pkg/libcontainer/mount/init.go | 7 +- pkg/libcontainer/nsinit/init.go | 4 +- .../security/restrict/restrict.go | 65 ++++++------------- .../security/restrict/unsupported.go | 2 +- 8 files changed, 54 insertions(+), 107 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 3fe44202ac..92a79ff5a5 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -2,12 +2,6 @@ package lxc import ( "fmt" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" - "github.com/dotcloud/docker/pkg/system" - "github.com/dotcloud/docker/utils" "io/ioutil" "log" "os" @@ -18,6 +12,13 @@ import ( "strings" "syscall" "time" + + "github.com/dotcloud/docker/daemon/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" + "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/utils" ) const DriverName = "lxc" @@ -27,31 +28,26 @@ func init() { if err := setupEnv(args); err != nil { return err } - if err := setupHostname(args); err != nil { return err } - if err := setupNetworking(args); err != nil { return err } - - if err := restrict.Restrict("/", "/empty"); err != nil { - return err + if !args.Privileged { + if err := restrict.Restrict(); err != nil { + return err + } } - if err := setupCapabilities(args); err != nil { return err } - if err := setupWorkingDirectory(args); err != nil { return err } - if err := system.CloseFdsFrom(3); err != nil { return err } - if err := changeUser(args); err != nil { return err } @@ -69,10 +65,9 @@ func init() { } type driver struct { - root string // root path for the driver to use - apparmor bool - sharedRoot bool - restrictionPath string + root string // root path for the driver to use + apparmor bool + sharedRoot bool } func NewDriver(root string, apparmor bool) (*driver, error) { @@ -80,15 +75,10 @@ func NewDriver(root string, apparmor bool) (*driver, error) { if err := linkLxcStart(root); err != nil { return nil, err } - restrictionPath := filepath.Join(root, "empty") - if err := os.MkdirAll(restrictionPath, 0700); err != nil { - return nil, err - } return &driver{ - apparmor: apparmor, - root: root, - sharedRoot: rootIsShared(), - restrictionPath: restrictionPath, + apparmor: apparmor, + root: root, + sharedRoot: rootIsShared(), }, nil } @@ -419,16 +409,14 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { if err := LxcTemplateCompiled.Execute(fo, struct { *execdriver.Command - AppArmor bool - ProcessLabel string - MountLabel string - RestrictionSource string + AppArmor bool + ProcessLabel string + MountLabel string }{ - Command: c, - AppArmor: d.apparmor, - ProcessLabel: process, - MountLabel: mount, - RestrictionSource: d.restrictionPath, + Command: c, + AppArmor: d.apparmor, + ProcessLabel: process, + MountLabel: mount, }); err != nil { return "", err } diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 03d32e72b5..19fa43c4c2 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -1,10 +1,11 @@ package lxc import ( - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/label" "strings" "text/template" + + "github.com/dotcloud/docker/daemon/execdriver" + "github.com/dotcloud/docker/pkg/label" ) const LxcTemplate = ` @@ -110,13 +111,6 @@ lxc.aa_profile = unconfined {{else}} # Let AppArmor normal confinement take place (i.e., not unconfined) {{end}} -{{else}} -# Restrict access to some stuff in /proc. Note that /proc is already mounted -# read-only, so we don't need to bother about things that are just dangerous -# to write to (like sysrq-trigger). Also, recent kernels won't let a container -# peek into /proc/kcore, but let's cater for people who might run Docker on -# older kernels. Just in case. -lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/kcore none bind,ro 0 0 {{end}} # limits diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 6f663f916e..5562d08986 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -24,7 +24,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container container.Cgroups.Name = c.ID // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" - container.Context["restriction_path"] = d.restrictionPath + container.Context["restrictions"] = "true" if err := d.createNetwork(container, c); err != nil { return nil, err @@ -84,7 +84,7 @@ func (d *driver) setPrivileged(container *libcontainer.Container) error { } container.Cgroups.DeviceAccess = true - delete(container.Context, "restriction_path") + delete(container.Context, "restrictions") if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "unconfined" diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index a397387f11..e674d57333 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -57,7 +57,6 @@ type driver struct { root string initPath string activeContainers map[string]*exec.Cmd - restrictionPath string } func NewDriver(root, initPath string) (*driver, error) { @@ -68,14 +67,8 @@ func NewDriver(root, initPath string) (*driver, error) { if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil { return nil, err } - restrictionPath := filepath.Join(root, "empty") - if err := os.MkdirAll(restrictionPath, 0700); err != nil { - return nil, err - } - return &driver{ root: root, - restrictionPath: restrictionPath, initPath: initPath, activeContainers: make(map[string]*exec.Cmd), }, nil diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index cc3ce2158e..6a54f2444e 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -123,15 +123,12 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo systemMounts := []mount{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } if len(mounts.OfType("devtmpfs")) == 1 { systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}) } - systemMounts = append(systemMounts, - mount{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)}, - mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, - ) - return systemMounts } diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 90b97a9f99..755847948e 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -72,8 +72,8 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, runtime.LockOSThread() - if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" { - if err := restrict.Restrict("/", restrictionPath); err != nil { + if container.Context["restrictions"] != "" { + if err := restrict.Restrict(); err != nil { return err } } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index a9bdc4bacb..2b7cea5a48 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -11,67 +11,42 @@ import ( "github.com/dotcloud/docker/pkg/system" ) -// "restrictions" are container paths (files, directories, whatever) that have to be masked. -// maskPath is a "safe" path to be mounted over maskedPath. It can take two special values: -// - if it is "", then nothing is mounted; -// - if it is "EMPTY", then an empty directory is mounted instead. -// If remountRO is true then the maskedPath is remounted read-only (regardless of whether a maskPath was used). -type restriction struct { - maskedPath string - maskPath string - remountRO bool -} - -var restrictions = []restriction{ - {"/proc", "", true}, - {"/sys", "", true}, - {"/proc/kcore", "/dev/null", false}, -} - // This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). // However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). -// "empty" should be the path to an empty directory. -func Restrict(rootfs, empty string) error { - for _, restriction := range restrictions { - dest := filepath.Join(rootfs, restriction.maskedPath) - if restriction.maskPath != "" { - var source string - if restriction.maskPath == "EMPTY" { - source = empty - } else { - source = filepath.Join(rootfs, restriction.maskPath) - } - if err := system.Mount(source, dest, "", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("unable to bind-mount %s over %s: %s", source, dest, err) - } - } - if restriction.remountRO { - if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { - return fmt.Errorf("unable to remount %s readonly: %s", dest, err) - } +func Restrict() error { + // remount proc and sys as readonly + for _, dest := range []string{"proc", "sys"} { + if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { + return fmt.Errorf("unable to remount %s readonly: %s", dest, err) } } + if err := system.Mount("/proc/kcore", "/dev/null", "", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore") + } + // This weird trick will allow us to mount /proc read-only, while being able to use AppArmor. // This is because apparently, loading an AppArmor profile requires write access to /proc/1/attr. // So we do another mount of procfs, ensure it's write-able, and bind-mount a subset of it. - tmpProcPath := filepath.Join(rootfs, ".proc") - if err := os.Mkdir(tmpProcPath, 0700); err != nil { - return fmt.Errorf("unable to create temporary proc mountpoint %s: %s", tmpProcPath, err) + var ( + rwAttrPath = filepath.Join(".proc", "1", "attr") + roAttrPath = filepath.Join("proc", "1", "attr") + ) + + if err := os.Mkdir(".proc", 0700); err != nil { + return fmt.Errorf("unable to create temporary proc mountpoint .proc: %s", err) } - if err := system.Mount("proc", tmpProcPath, "proc", 0, ""); err != nil { + if err := system.Mount("proc", ".proc", "proc", 0, ""); err != nil { return fmt.Errorf("unable to mount proc on temporary proc mountpoint: %s", err) } - if err := system.Mount("proc", tmpProcPath, "", syscall.MS_REMOUNT, ""); err != nil { + if err := system.Mount("proc", ".proc", "", syscall.MS_REMOUNT, ""); err != nil { return fmt.Errorf("unable to remount proc read-write: %s", err) } - rwAttrPath := filepath.Join(rootfs, ".proc", "1", "attr") - roAttrPath := filepath.Join(rootfs, "proc", "1", "attr") if err := system.Mount(rwAttrPath, roAttrPath, "", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("unable to bind-mount %s on %s: %s", rwAttrPath, roAttrPath, err) } - if err := system.Unmount(tmpProcPath, 0); err != nil { + if err := system.Unmount(".proc", 0); err != nil { return fmt.Errorf("unable to unmount temporary proc filesystem: %s", err) } - return nil + return os.RemoveAll(".proc") } diff --git a/pkg/libcontainer/security/restrict/unsupported.go b/pkg/libcontainer/security/restrict/unsupported.go index 6898baab3d..464e8d498d 100644 --- a/pkg/libcontainer/security/restrict/unsupported.go +++ b/pkg/libcontainer/security/restrict/unsupported.go @@ -4,6 +4,6 @@ package restrict import "fmt" -func Restrict(rootfs, empty string) error { +func Restrict() error { return fmt.Errorf("not supported") } From 3f74bdd93f08b3001f11a137210ee67a6d23c084 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 1 May 2014 11:11:29 -0700 Subject: [PATCH 144/219] Mount attr and task as rw for selinux support Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/security/restrict/restrict.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index 2b7cea5a48..74de70aa6a 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -28,11 +28,6 @@ func Restrict() error { // This weird trick will allow us to mount /proc read-only, while being able to use AppArmor. // This is because apparently, loading an AppArmor profile requires write access to /proc/1/attr. // So we do another mount of procfs, ensure it's write-able, and bind-mount a subset of it. - var ( - rwAttrPath = filepath.Join(".proc", "1", "attr") - roAttrPath = filepath.Join("proc", "1", "attr") - ) - if err := os.Mkdir(".proc", 0700); err != nil { return fmt.Errorf("unable to create temporary proc mountpoint .proc: %s", err) } @@ -42,8 +37,10 @@ func Restrict() error { if err := system.Mount("proc", ".proc", "", syscall.MS_REMOUNT, ""); err != nil { return fmt.Errorf("unable to remount proc read-write: %s", err) } - if err := system.Mount(rwAttrPath, roAttrPath, "", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("unable to bind-mount %s on %s: %s", rwAttrPath, roAttrPath, err) + for _, path := range []string{"attr", "task"} { + if err := system.Mount(filepath.Join(".proc", "1", path), filepath.Join("proc", "1", path), "", syscall.MS_BIND, ""); err != nil { + return fmt.Errorf("unable to bind-mount %s: %s", path, err) + } } if err := system.Unmount(".proc", 0); err != nil { return fmt.Errorf("unable to unmount temporary proc filesystem: %s", err) From 24e0df8136c238cb3e231b939a82058950e6eb02 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 1 May 2014 13:55:23 -0700 Subject: [PATCH 145/219] Fix /proc/kcore mount of /dev/null Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/security/restrict/restrict.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index 74de70aa6a..411bc06807 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -20,8 +20,7 @@ func Restrict() error { return fmt.Errorf("unable to remount %s readonly: %s", dest, err) } } - - if err := system.Mount("/proc/kcore", "/dev/null", "", syscall.MS_BIND, ""); err != nil { + if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore") } From 71e3757174c3c1617d636ddd7462c39617ba5a77 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Thu, 1 May 2014 15:51:38 -0700 Subject: [PATCH 146/219] Adding Rohit Jnagal and Victor Marmol to pkg/libcontainer maintainers. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/libcontainer/MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/libcontainer/MAINTAINERS b/pkg/libcontainer/MAINTAINERS index 1cb551364d..41f04602ee 100644 --- a/pkg/libcontainer/MAINTAINERS +++ b/pkg/libcontainer/MAINTAINERS @@ -1,2 +1,4 @@ Michael Crosby (@crosbymichael) Guillaume J. Charmes (@creack) +Rohit Jnagal (@rjnagal) +Victor Marmol (@vmarmol) From de49e7c0a640aada97ace458a4e5d63f5f52d4eb Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 1 May 2014 14:39:43 +1000 Subject: [PATCH 147/219] Bring back archived remote API versions - git mv archived/* . - put the links back into the summary document - reduce the header depth by 1 so the TOC lists each API version - update the mkdocs.yaml to render the archived API docs, but not add them to the menu/nav Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/mkdocs.yml | 14 ++- .../reference/api/docker_remote_api.md | 103 ++++++++++-------- .../{archive => }/docker_remote_api_v1.0.md | 0 .../{archive => }/docker_remote_api_v1.1.md | 0 .../{archive => }/docker_remote_api_v1.2.md | 0 .../{archive => }/docker_remote_api_v1.3.md | 0 .../{archive => }/docker_remote_api_v1.4.md | 0 .../{archive => }/docker_remote_api_v1.5.md | 0 .../{archive => }/docker_remote_api_v1.6.md | 0 .../{archive => }/docker_remote_api_v1.7.md | 0 .../{archive => }/docker_remote_api_v1.8.md | 0 11 files changed, 72 insertions(+), 45 deletions(-) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.0.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.1.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.2.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.3.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.4.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.5.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.6.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.7.md (100%) rename docs/sources/reference/api/{archive => }/docker_remote_api_v1.8.md (100%) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 29b926816c..705ff0a549 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -103,12 +103,24 @@ pages: - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] - ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] +- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.10'] - ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10'] -- ['reference/api/docker_remote_api_v1.9.md', 'Reference', 'Docker Remote API v1.9'] - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] - ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API'] - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API'] +#archived API references +- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] + # Contribute: - ['contributing/index.md', '**HIDDEN**'] - ['contributing/contributing.md', 'Contribute', 'Contributing'] diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index d6c25c75f2..8a490b52ee 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -4,8 +4,6 @@ page_keywords: API, Docker, rcli, REST, documentation # Docker Remote API -## 1. Brief introduction - - The Remote API is replacing rcli - By default the Docker daemon listens on unix:///var/run/docker.sock and the client must have root access to interact with the daemon @@ -21,9 +19,8 @@ page_keywords: API, Docker, rcli, REST, documentation `{'username': string, 'password': string, 'email': string, 'serveraddress' : string}` -## 2. Versions -The current version of the API is 1.11 +The current version of the API is v1.11 Calling /images//insert is the same as calling /v1.11/images//insert @@ -31,13 +28,13 @@ Calling /images//insert is the same as calling You can still call an old version of the api using /v1.11/images//insert -### v1.11 +## v1.11 -#### Full Documentation +### Full Documentation -[*Docker Remote API v1.11*](../docker_remote_api_v1.11/) +[*Docker Remote API v1.11*](/reference/api/docker_remote_api_v1.11/) -#### What's new +### What's new `GET /events` @@ -49,13 +46,13 @@ after timestamp. This url is prefered method for getting container logs now. -### v1.10 +## v1.10 -#### Full Documentation +### Full Documentation -[*Docker Remote API v1.10*](../docker_remote_api_v1.10/) +[*Docker Remote API v1.10*](/reference/api/docker_remote_api_v1.10/) -#### What's new +### What's new `DELETE /images/(name)` @@ -72,13 +69,13 @@ You can now use the force parameter to force delete of an You can now use the force paramter to force delete a container, even if it is currently running -### v1.9 +## v1.9 -#### Full Documentation +### Full Documentation -[*Docker Remote API v1.9*](../docker_remote_api_v1.9/) +[*Docker Remote API v1.9*](/reference/api/docker_remote_api_v1.9/) -#### What's new +### What's new `POST /build` @@ -88,11 +85,13 @@ uses to resolve the proper registry auth credentials for pulling the base image. Clients which previously implemented the version accepting an AuthConfig object must be updated. -### v1.8 +## v1.8 -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.8*](/reference/api/docker_remote_api_v1.8/) + +### What's new `POST /build` @@ -118,11 +117,13 @@ progressDetail object was added in the JSON. It's now possible to get the current value and the total of the progress without having to parse the string. -### v1.7 +## v1.7 -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.7*](/reference/api/docker_remote_api_v1.7/) + +### What's new `GET /images/json` @@ -215,11 +216,13 @@ This URI no longer exists. The `images --viz` output is now generated in the client, using the `/images/json` data. -### v1.6 +## v1.6 -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.6*](/reference/api/docker_remote_api_v1.6/) + +### What's new `POST /containers/(id)/attach` @@ -227,15 +230,17 @@ output is now generated in the client, using the You can now split stderr from stdout. This is done by prefixing a header to each transmition. See [`POST /containers/(id)/attach`]( -../docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach"). +/reference/api/docker_remote_api_v1.9/#post--containers-(id)-attach "POST /containers/(id)/attach"). The WebSocket attach is unchanged. Note that attach calls on the previous API version didn't change. Stdout and stderr are merged. -### v1.5 +## v1.5 -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.5*](/reference/api/docker_remote_api_v1.5/) + +### What's new `POST /images/create` @@ -256,11 +261,13 @@ The format of the Ports entry has been changed to a list of dicts each containing PublicPort, PrivatePort and Type describing a port mapping. -### v1.4 +## v1.4 -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.4*](/reference/api/docker_remote_api_v1.4/) + +### What's new `POST /images/create` @@ -278,14 +285,16 @@ You can now use ps args with docker top, like docker top **New!** Image's name added in the events -### v1.3 +## v1.3 docker v0.5.0 [51f6c4a](https://github.com/dotcloud/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909) -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.3*](/reference/api/docker_remote_api_v1.3/) + +### What's new `GET /containers/(id)/top` @@ -316,14 +325,16 @@ Start containers (/containers//start): - You can now pass host-specific configuration (e.g. bind mounts) in the POST body for start calls -### v1.2 +## v1.2 docker v0.4.2 [2e7649b](https://github.com/dotcloud/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168) -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.2*](/reference/api/docker_remote_api_v1.2/) + +### What's new The auth configuration is now handled by the client. @@ -346,14 +357,16 @@ Only checks the configuration but doesn't store it on the server Now returns a JSON structure with the list of images deleted/untagged. -### v1.1 +## v1.1 docker v0.4.0 [a8ae398](https://github.com/dotcloud/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f) -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.1*](/reference/api/docker_remote_api_v1.1/) + +### What's new `POST /images/create` @@ -371,13 +384,15 @@ Uses json stream instead of HTML hijack, it looks like this: {"error":"Invalid..."} ... -### v1.0 +## v1.0 docker v0.3.4 [8d73740](https://github.com/dotcloud/docker/commit/8d73740343778651c09160cde9661f5f387b36f4) -#### Full Documentation +### Full Documentation -#### What's new +[*Docker Remote API v1.0*](/reference/api/docker_remote_api_v1.0/) + +### What's new Initial version diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.0.md rename to docs/sources/reference/api/docker_remote_api_v1.0.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.1.md rename to docs/sources/reference/api/docker_remote_api_v1.1.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.2.md rename to docs/sources/reference/api/docker_remote_api_v1.2.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.3.md rename to docs/sources/reference/api/docker_remote_api_v1.3.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.4.md rename to docs/sources/reference/api/docker_remote_api_v1.4.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.5.md rename to docs/sources/reference/api/docker_remote_api_v1.5.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.6.md rename to docs/sources/reference/api/docker_remote_api_v1.6.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.7.md rename to docs/sources/reference/api/docker_remote_api_v1.7.md diff --git a/docs/sources/reference/api/archive/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md similarity index 100% rename from docs/sources/reference/api/archive/docker_remote_api_v1.8.md rename to docs/sources/reference/api/docker_remote_api_v1.8.md From 5a8ffe7ef1c33996b9032fec2cf7cb2bf64793f0 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 1 May 2014 16:03:45 +1000 Subject: [PATCH 148/219] make sure the intermediate index.html files are generated consistently Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/mkdocs.yml | 28 ++++++++++----------------- docs/sources/docker-io/index.md | 15 -------------- docs/sources/reference/commandline.md | 7 ------- 3 files changed, 10 insertions(+), 40 deletions(-) delete mode 100644 docs/sources/docker-io/index.md delete mode 100644 docs/sources/reference/commandline.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 705ff0a549..dd6b987f11 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -91,6 +91,7 @@ pages: # Reference - ['reference/index.md', '**HIDDEN**'] +- ['reference/commandline/index.md', '**HIDDEN**'] - ['reference/commandline/cli.md', 'Reference', 'Command line'] - ['reference/builder.md', 'Reference', 'Dockerfile'] - ['reference/run.md', 'Reference', 'Run Reference'] @@ -99,6 +100,7 @@ pages: - ['articles/security.md', 'Reference', 'Security'] - ['articles/baseimages.md', 'Reference', 'Creating a Base Image'] - ['use/networking.md', 'Reference', 'Advanced networking'] +- ['reference/api/index.md', '**HIDDEN**'] - ['reference/api/docker-io_api.md', 'Reference', 'Docker.io API'] - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] - ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec'] @@ -109,24 +111,6 @@ pages: - ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API'] - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API'] -#archived API references -- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] -- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] - -# Contribute: -- ['contributing/index.md', '**HIDDEN**'] -- ['contributing/contributing.md', 'Contribute', 'Contributing'] -- ['contributing/devenvironment.md', 'Contribute', 'Development environment'] -# - ['about/license.md', 'About', 'License'] - - ['jsearch.md', '**HIDDEN**'] # - ['static_files/README.md', 'static_files', 'README'] @@ -138,3 +122,11 @@ pages: - ['terms/repository.md', '**HIDDEN**'] - ['terms/filesystem.md', '**HIDDEN**'] - ['terms/image.md', '**HIDDEN**'] + +# TODO: our theme adds a dropdown even for sections that have no subsections. + #- ['faq.md', 'FAQ'] + +# Contribute: +- ['contributing/index.md', '**HIDDEN**'] +- ['contributing/contributing.md', 'Contribute', 'Contributing'] +- ['contributing/devenvironment.md', 'Contribute', 'Development environment'] diff --git a/docs/sources/docker-io/index.md b/docs/sources/docker-io/index.md deleted file mode 100644 index 747b4ee491..0000000000 --- a/docs/sources/docker-io/index.md +++ /dev/null @@ -1,15 +0,0 @@ -title -: Documentation - -description -: -- todo: change me - -keywords -: todo, docker, documentation, basic, builder - -Use -=== - -Contents: - -{{ site_name }} diff --git a/docs/sources/reference/commandline.md b/docs/sources/reference/commandline.md deleted file mode 100644 index b15f529394..0000000000 --- a/docs/sources/reference/commandline.md +++ /dev/null @@ -1,7 +0,0 @@ - -# Commands - -## Contents: - -- [Command Line](cli/) - From 314bd02d2ccd7ab59b67d02a53669028695dd3bc Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 00:25:10 +0000 Subject: [PATCH 149/219] remove when httputil.NewClientConn when not in hijack Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/utils.go | 48 ++++++++++++++++----------------------------- 1 file changed, 17 insertions(+), 31 deletions(-) diff --git a/api/client/utils.go b/api/client/utils.go index 7f7498dee7..152e3540ff 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -33,6 +33,18 @@ var ( ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") ) +func (cli *DockerCli) HTTPClient() *http.Client { + tr := &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return net.Dial(cli.proto, cli.addr) + }, + } + if cli.proto != "unix" { + tr.TLSClientConfig = cli.tlsConfig + } + return &http.Client{Transport: tr} +} + func (cli *DockerCli) dial() (net.Conn, error) { if cli.tlsConfig != nil && cli.proto != "unix" { return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) @@ -61,7 +73,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), params) if err != nil { return nil, -1, err } @@ -92,22 +104,13 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } else if method == "POST" { req.Header.Set("Content-Type", "plain/text") } - dial, err := cli.dial() + resp, err := cli.HTTPClient().Do(req) if err != nil { if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } return nil, -1, err } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - if err != nil { - clientconn.Close() - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } if resp.StatusCode < 200 || resp.StatusCode >= 400 { body, err := ioutil.ReadAll(resp.Body) @@ -119,14 +122,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } - - wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return clientconn.Close() - }) - return wrapper, resp.StatusCode, nil + return resp.Body, resp.StatusCode, nil } func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { @@ -142,7 +138,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) + req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), in) if err != nil { return err } @@ -157,17 +153,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in req.Header[k] = v } } - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - defer clientconn.Close() + resp, err := cli.HTTPClient().Do(req) if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") From 41db1756268376465fd92038dfba1cca7f219595 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 2 May 2014 10:46:41 +1000 Subject: [PATCH 150/219] Force the older API docs to be generated. Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/mkdocs.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dd6b987f11..c16436e892 100755 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -105,8 +105,18 @@ pages: - ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] - ['reference/api/registry_index_spec.md', 'Reference', 'Registry & Index Spec'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] -- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.10'] +- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11'] - ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10'] +- ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] - ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API'] - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API'] From 76fa7d588adfe644824d9a00dafce2d2991a7013 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 1 May 2014 19:09:12 -0700 Subject: [PATCH 151/219] Apply apparmor before restrictions There is not need for the remount hack, we use aa_change_onexec so the apparmor profile is not applied until we exec the users app. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/apparmor/apparmor.go | 2 +- pkg/apparmor/apparmor_disabled.go | 4 +-- pkg/libcontainer/console/console.go | 5 ++-- pkg/libcontainer/nsinit/init.go | 13 +++++----- .../security/restrict/restrict.go | 25 +------------------ 5 files changed, 12 insertions(+), 37 deletions(-) diff --git a/pkg/apparmor/apparmor.go b/pkg/apparmor/apparmor.go index 6fdb1f8958..704ee29ed0 100644 --- a/pkg/apparmor/apparmor.go +++ b/pkg/apparmor/apparmor.go @@ -20,7 +20,7 @@ func IsEnabled() bool { return false } -func ApplyProfile(pid int, name string) error { +func ApplyProfile(name string) error { if name == "" { return nil } diff --git a/pkg/apparmor/apparmor_disabled.go b/pkg/apparmor/apparmor_disabled.go index 77543e4a87..8d86ce9d4a 100644 --- a/pkg/apparmor/apparmor_disabled.go +++ b/pkg/apparmor/apparmor_disabled.go @@ -2,12 +2,10 @@ package apparmor -import () - func IsEnabled() bool { return false } -func ApplyProfile(pid int, name string) error { +func ApplyProfile(name string) error { return nil } diff --git a/pkg/libcontainer/console/console.go b/pkg/libcontainer/console/console.go index 05cd08a92e..5f06aea225 100644 --- a/pkg/libcontainer/console/console.go +++ b/pkg/libcontainer/console/console.go @@ -4,11 +4,12 @@ package console import ( "fmt" - "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/system" "os" "path/filepath" "syscall" + + "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/system" ) // Setup initializes the proper /dev/console inside the rootfs path diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 755847948e..22345f603f 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -72,18 +72,17 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, runtime.LockOSThread() + if err := apparmor.ApplyProfile(container.Context["apparmor_profile"]); err != nil { + return fmt.Errorf("set apparmor profile %s: %s", container.Context["apparmor_profile"], err) + } + if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { + return fmt.Errorf("set process label %s", err) + } if container.Context["restrictions"] != "" { if err := restrict.Restrict(); err != nil { return err } } - - if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { - return err - } - if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { - return fmt.Errorf("set process label %s", err) - } if err := FinalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index 411bc06807..cfff09f512 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -4,8 +4,6 @@ package restrict import ( "fmt" - "os" - "path/filepath" "syscall" "github.com/dotcloud/docker/pkg/system" @@ -23,26 +21,5 @@ func Restrict() error { if err := system.Mount("/dev/null", "/proc/kcore", "", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("unable to bind-mount /dev/null over /proc/kcore") } - - // This weird trick will allow us to mount /proc read-only, while being able to use AppArmor. - // This is because apparently, loading an AppArmor profile requires write access to /proc/1/attr. - // So we do another mount of procfs, ensure it's write-able, and bind-mount a subset of it. - if err := os.Mkdir(".proc", 0700); err != nil { - return fmt.Errorf("unable to create temporary proc mountpoint .proc: %s", err) - } - if err := system.Mount("proc", ".proc", "proc", 0, ""); err != nil { - return fmt.Errorf("unable to mount proc on temporary proc mountpoint: %s", err) - } - if err := system.Mount("proc", ".proc", "", syscall.MS_REMOUNT, ""); err != nil { - return fmt.Errorf("unable to remount proc read-write: %s", err) - } - for _, path := range []string{"attr", "task"} { - if err := system.Mount(filepath.Join(".proc", "1", path), filepath.Join("proc", "1", path), "", syscall.MS_BIND, ""); err != nil { - return fmt.Errorf("unable to bind-mount %s: %s", path, err) - } - } - if err := system.Unmount(".proc", 0); err != nil { - return fmt.Errorf("unable to unmount temporary proc filesystem: %s", err) - } - return os.RemoveAll(".proc") + return nil } From 877ad96d89093af8b16112c3534f4ceceaf1b7b3 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Fri, 2 May 2014 16:53:59 +0200 Subject: [PATCH 152/219] cli.md: Fix up Markdown formatting by adding one ` --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index cfcab2af47..df55c4b2a2 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -244,7 +244,7 @@ See also: This example specifies that the `PATH` is `.`, and so all the files in the local directory get -tar`d and sent to the Docker daemon. The `PATH` +`tar`d and sent to the Docker daemon. The `PATH` specifies where to find the files for the "context" of the build on the Docker daemon. Remember that the daemon could be running on a remote machine and that no parsing of the Dockerfile From 8c9192cd76ad46bda3d0ec5ba7eb4a30669afb40 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 00:40:13 +0000 Subject: [PATCH 153/219] move hijack to it's own file Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/hijack.go | 133 ++++++++++++++++++++++++++++++++++++++++++ api/client/utils.go | 134 +------------------------------------------ 2 files changed, 134 insertions(+), 133 deletions(-) create mode 100644 api/client/hijack.go diff --git a/api/client/hijack.go b/api/client/hijack.go new file mode 100644 index 0000000000..0a9d5d8ef2 --- /dev/null +++ b/api/client/hijack.go @@ -0,0 +1,133 @@ +package client + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/utils" +) + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { + defer func() { + if started != nil { + close(started) + } + }() + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Header.Set("Content-Type", "plain/text") + req.Host = cli.addr + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.terminalFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.terminalFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = utils.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminal { + term.RestoreTerminal(cli.terminalFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if runtime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, br) + } else { + _, err = utils.StdCopy(stdout, stderr, br) + } + utils.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := utils.Go(func() error { + if in != nil { + io.Copy(rwc, in) + utils.Debugf("[hijack] End of stdin") + } + if tcpc, ok := rwc.(*net.TCPConn); ok { + if err := tcpc.CloseWrite(); err != nil { + utils.Debugf("Couldn't send EOF: %s\n", err) + } + } else if unixc, ok := rwc.(*net.UnixConn); ok { + if err := unixc.CloseWrite(); err != nil { + utils.Debugf("Couldn't send EOF: %s\n", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + utils.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminal { + if err := <-sendStdin; err != nil { + utils.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil +} diff --git a/api/client/utils.go b/api/client/utils.go index 152e3540ff..6f574b48db 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -2,7 +2,6 @@ package client import ( "bytes" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -11,12 +10,9 @@ import ( "io/ioutil" "net" "net/http" - "net/http/httputil" "net/url" "os" gosignal "os/signal" - "regexp" - goruntime "runtime" "strconv" "strings" "syscall" @@ -35,23 +31,14 @@ var ( func (cli *DockerCli) HTTPClient() *http.Client { tr := &http.Transport{ + TLSClientConfig: cli.tlsConfig, Dial: func(network, addr string) (net.Conn, error) { return net.Dial(cli.proto, cli.addr) }, } - if cli.proto != "unix" { - tr.TLSClientConfig = cli.tlsConfig - } return &http.Client{Transport: tr} } -func (cli *DockerCli) dial() (net.Conn, error) { - if cli.tlsConfig != nil && cli.proto != "unix" { - return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) - } - return net.Dial(cli.proto, cli.addr) -} - func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { params := bytes.NewBuffer(nil) if data != nil { @@ -69,9 +56,6 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } } } - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), params) if err != nil { @@ -134,10 +118,6 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in in = bytes.NewReader([]byte{}) } - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), in) if err != nil { return err @@ -189,118 +169,6 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in return nil } -func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { - defer func() { - if started != nil { - close(started) - } - }() - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Header.Set("Content-Type", "plain/text") - req.Host = cli.addr - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - clientconn.Do(req) - - rwc, br := clientconn.Hijack() - defer rwc.Close() - - if started != nil { - started <- rwc - } - - var receiveStdout chan error - - var oldState *term.State - - if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.terminalFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.terminalFd, oldState) - } - - if stdout != nil || stderr != nil { - receiveStdout = utils.Go(func() (err error) { - defer func() { - if in != nil { - if setRawTerminal && cli.isTerminal { - term.RestoreTerminal(cli.terminalFd, oldState) - } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if goruntime.GOOS != "darwin" { - in.Close() - } - } - }() - - // When TTY is ON, use regular copy - if setRawTerminal { - _, err = io.Copy(stdout, br) - } else { - _, err = utils.StdCopy(stdout, stderr, br) - } - utils.Debugf("[hijack] End of stdout") - return err - }) - } - - sendStdin := utils.Go(func() error { - if in != nil { - io.Copy(rwc, in) - utils.Debugf("[hijack] End of stdin") - } - if tcpc, ok := rwc.(*net.TCPConn); ok { - if err := tcpc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } else if unixc, ok := rwc.(*net.UnixConn); ok { - if err := unixc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } - // Discard errors due to pipe interruption - return nil - }) - - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { - utils.Debugf("Error receiveStdout: %s", err) - return err - } - } - - if !cli.isTerminal { - if err := <-sendStdin; err != nil { - utils.Debugf("Error sendStdin: %s", err) - return err - } - } - return nil - -} - func (cli *DockerCli) resizeTty(id string) { height, width := cli.getTtySize() if height == 0 && width == 0 { From 59fe77bfa638001cbe9af386f350d6e0dbb23398 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 11:14:24 -0700 Subject: [PATCH 154/219] Don't restrict lxc because of apparmor We don't have the flexibility to do extra things with lxc because it is a black box and most fo the magic happens before we get a chance to interact with it in dockerinit. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/lxc/driver.go | 6 ------ pkg/libcontainer/nsinit/init.go | 2 +- pkg/libcontainer/security/restrict/restrict.go | 4 ++-- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 92a79ff5a5..2c06211c0d 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -16,7 +16,6 @@ import ( "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/label" - "github.com/dotcloud/docker/pkg/libcontainer/security/restrict" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" ) @@ -34,11 +33,6 @@ func init() { if err := setupNetworking(args); err != nil { return err } - if !args.Privileged { - if err := restrict.Restrict(); err != nil { - return err - } - } if err := setupCapabilities(args); err != nil { return err } diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 22345f603f..a123757708 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -79,7 +79,7 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, return fmt.Errorf("set process label %s", err) } if container.Context["restrictions"] != "" { - if err := restrict.Restrict(); err != nil { + if err := restrict.Restrict("proc", "sys"); err != nil { return err } } diff --git a/pkg/libcontainer/security/restrict/restrict.go b/pkg/libcontainer/security/restrict/restrict.go index cfff09f512..e1296b1d7f 100644 --- a/pkg/libcontainer/security/restrict/restrict.go +++ b/pkg/libcontainer/security/restrict/restrict.go @@ -11,9 +11,9 @@ import ( // This has to be called while the container still has CAP_SYS_ADMIN (to be able to perform mounts). // However, afterwards, CAP_SYS_ADMIN should be dropped (otherwise the user will be able to revert those changes). -func Restrict() error { +func Restrict(mounts ...string) error { // remount proc and sys as readonly - for _, dest := range []string{"proc", "sys"} { + for _, dest := range mounts { if err := system.Mount("", dest, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil { return fmt.Errorf("unable to remount %s readonly: %s", dest, err) } From 9f152aacf8427cbd20a70d52d633f8a6d624aff5 Mon Sep 17 00:00:00 2001 From: Gabriel Monroy Date: Fri, 2 May 2014 13:27:17 -0600 Subject: [PATCH 155/219] deregister containers before removing driver and containerGraph references This is required to address a race condition described in #5553, where a container can be partially deleted -- for example, the root filesystem but not the init filesystem -- which makes it impossible to delete the container without re-adding the missing filesystems manually. This behavior has been witnessed when rebooting boxes that are configured to remove containers on shutdown in parallel with stopping the Docker daemon. Docker-DCO-1.1-Signed-off-by: Gabriel Monroy (github: gabrtv) --- daemon/daemon.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 64a53989d0..22182f389f 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -272,6 +272,10 @@ func (daemon *Daemon) Destroy(container *Container) error { return err } + // Deregister the container before removing its directory, to avoid race conditions + daemon.idIndex.Delete(container.ID) + daemon.containers.Remove(element) + if err := daemon.driver.Remove(container.ID); err != nil { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) } @@ -285,9 +289,6 @@ func (daemon *Daemon) Destroy(container *Container) error { utils.Debugf("Unable to remove container from link graph: %s", err) } - // Deregister the container before removing its directory, to avoid race conditions - daemon.idIndex.Delete(container.ID) - daemon.containers.Remove(element) if err := os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } From 45be6f6dff1a8be328e5ade008aae8f9062f5cef Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 19:49:12 +0000 Subject: [PATCH 156/219] fix https Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/cli.go | 7 +++++++ api/client/utils.go | 8 +++++--- integration/https_test.go | 8 +++++--- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/api/client/cli.go b/api/client/cli.go index b58d3c3c75..49fb3c978f 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -65,8 +65,13 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC var ( isTerminal = false terminalFd uintptr + scheme = "http" ) + if tlsConfig != nil { + scheme = "https" + } + if in != nil { if file, ok := in.(*os.File); ok { terminalFd = file.Fd() @@ -86,6 +91,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsC isTerminal: isTerminal, terminalFd: terminalFd, tlsConfig: tlsConfig, + scheme: scheme, } } @@ -99,4 +105,5 @@ type DockerCli struct { isTerminal bool terminalFd uintptr tlsConfig *tls.Config + scheme string } diff --git a/api/client/utils.go b/api/client/utils.go index 6f574b48db..8f303dcd98 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -57,7 +57,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } } - req, err := http.NewRequest(method, fmt.Sprintf("http://v%s%s", api.APIVERSION, path), params) + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) if err != nil { return nil, -1, err } @@ -82,7 +82,8 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme if data != nil { req.Header.Set("Content-Type", "application/json") } else if method == "POST" { @@ -123,7 +124,8 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in return err } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme if method == "POST" { req.Header.Set("Content-Type", "plain/text") } diff --git a/integration/https_test.go b/integration/https_test.go index 0b4abea881..34c16cf9f9 100644 --- a/integration/https_test.go +++ b/integration/https_test.go @@ -3,10 +3,12 @@ package docker import ( "crypto/tls" "crypto/x509" - "github.com/dotcloud/docker/api/client" "io/ioutil" + "strings" "testing" "time" + + "github.com/dotcloud/docker/api/client" ) const ( @@ -56,7 +58,7 @@ func TestHttpsInfoRogueCert(t *testing.T) { if err == nil { t.Fatal("Expected error but got nil") } - if err.Error() != errBadCertificate { + if !strings.Contains(err.Error(), errBadCertificate) { t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) } }) @@ -74,7 +76,7 @@ func TestHttpsInfoRogueServerCert(t *testing.T) { t.Fatal("Expected error but got nil") } - if err.Error() != errCaUnknown { + if !strings.Contains(err.Error(), errCaUnknown) { t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) } From a7ccbfd5f143af8a7accc69803b1588e568328ac Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Fri, 2 May 2014 13:55:45 -0700 Subject: [PATCH 157/219] Month devpts before mounting subdirs Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/libcontainer/mount/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/libcontainer/mount/init.go b/pkg/libcontainer/mount/init.go index 6a54f2444e..cfe61d1532 100644 --- a/pkg/libcontainer/mount/init.go +++ b/pkg/libcontainer/mount/init.go @@ -128,7 +128,7 @@ func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mo } if len(mounts.OfType("devtmpfs")) == 1 { - systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}) + systemMounts = append([]mount{{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: label.FormatMountLabel("mode=755", mountLabel)}}, systemMounts...) } return systemMounts } From 12a4b376fd42931d959cd925983243e94c981de4 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Fri, 2 May 2014 22:56:35 +0200 Subject: [PATCH 158/219] cli.md: Add space --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index cfcab2af47..c14a9041a7 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -991,7 +991,7 @@ optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container. -The `-a` flag tells `docker run` to bind to the container'sstdin, stdout or +The `-a` flag tells `docker run` to bind to the container's stdin, stdout or stderr. This makes it possible to manipulate the output and input as needed. $ sudo echo "test" | docker run -i -a stdin ubuntu cat - From 4706a1ad76ed9bc6c0555499d0bd8b8eea3b3604 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Fri, 2 May 2014 23:13:28 +0200 Subject: [PATCH 159/219] cli.md: Add another sudo --- docs/sources/reference/commandline/cli.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index cfcab2af47..59c15ccd49 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -498,7 +498,7 @@ Import to docker via pipe and *stdin*. **Import from a local directory:** - $ sudo tar -c . | docker import - exampleimagedir + $ sudo tar -c . | sudo docker import - exampleimagedir Note the `sudo` in this example – you must preserve the ownership of the files (especially root ownership) during the From 8913ec4912e529be44b7cc2aaf465b0d9b03ffc9 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Fri, 2 May 2014 14:15:54 -0700 Subject: [PATCH 160/219] Remove unused daemon/sorter.go Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- daemon/sorter.go | 25 ------------------------- 1 file changed, 25 deletions(-) delete mode 100644 daemon/sorter.go diff --git a/daemon/sorter.go b/daemon/sorter.go deleted file mode 100644 index c1525aa350..0000000000 --- a/daemon/sorter.go +++ /dev/null @@ -1,25 +0,0 @@ -package daemon - -import "sort" - -type containerSorter struct { - containers []*Container - by func(i, j *Container) bool -} - -func (s *containerSorter) Len() int { - return len(s.containers) -} - -func (s *containerSorter) Swap(i, j int) { - s.containers[i], s.containers[j] = s.containers[j], s.containers[i] -} - -func (s *containerSorter) Less(i, j int) bool { - return s.by(s.containers[i], s.containers[j]) -} - -func sortContainers(containers []*Container, predicate func(i, j *Container) bool) { - s := &containerSorter{containers, predicate} - sort.Sort(s) -} From cf0076b92dd11b3bda9ac7982e374d4531925ff9 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 21:43:51 +0000 Subject: [PATCH 161/219] add _ping endpoint Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/server/server.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/api/server/server.go b/api/server/server.go index 5db9df1901..18c9a93d97 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -3,7 +3,6 @@ package server import ( "bufio" "bytes" - "code.google.com/p/go.net/websocket" "crypto/tls" "crypto/x509" "encoding/base64" @@ -21,6 +20,8 @@ import ( "strings" "syscall" + "code.google.com/p/go.net/websocket" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/listenbuffer" @@ -976,6 +977,11 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } +func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Write([]byte{'O', 'K'}) + return nil +} + func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request @@ -1044,6 +1050,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st } m := map[string]map[string]HttpApiFunc{ "GET": { + "/_ping": ping, "/events": getEvents, "/info": getInfo, "/version": getVersion, From 3c422fe5bf45391a509fd3c7f33033baefb0a234 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 21:51:20 +0000 Subject: [PATCH 162/219] add doc Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- .../reference/api/docker_remote_api.md | 5 +++++ .../reference/api/docker_remote_api_v1.11.md | 20 +++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md index 8a490b52ee..47f4724b1a 100644 --- a/docs/sources/reference/api/docker_remote_api.md +++ b/docs/sources/reference/api/docker_remote_api.md @@ -36,6 +36,11 @@ You can still call an old version of the api using ### What's new +`GET /_ping` + +**New!** +You can now ping the server via the `_ping` endpoint. + `GET /events` **New!** diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index ebaa3e6e44..5ad174565d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -1162,6 +1162,26 @@ Show the docker version information - **200** – no error - **500** – server error +### Ping the docker server + +`GET /_ping` + +Ping the docker server + + **Example request**: + + GET /_ping HTTP/1.1 + + **Example response**: + + HTTP/1.1 200 OK + + OK + + Status Codes: + + - **200** - no error + ### Create a new image from a container's changes `POST /commit` From e318af6fb097ce5157b6766d8dfe921403858756 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Fri, 2 May 2014 22:59:43 +0200 Subject: [PATCH 163/219] cli.md: sudo at the right place Docker-DCO-1.1-Signed-off-by: Felix Rabe (github: felixrabe) --- docs/sources/reference/commandline/cli.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index cfcab2af47..ddbbb27f05 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -994,7 +994,7 @@ the same mode (read write or read only) as the reference container. The `-a` flag tells `docker run` to bind to the container'sstdin, stdout or stderr. This makes it possible to manipulate the output and input as needed. - $ sudo echo "test" | docker run -i -a stdin ubuntu cat - + $ echo "test" | sudo docker run -i -a stdin ubuntu cat - This pipes data into a container and prints the container's ID by attaching only to the container'sstdin. @@ -1005,7 +1005,7 @@ This isn't going to print anything unless there's an error because We've only attached to the stderr of the container. The container's logs still store what's been written to stderr and stdout. - $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild + $ cat somefile | sudo docker run -i -a stdin mybuilder dobuild This is how piping a file into a container could be done for a build. The container's ID will be printed after the build is done and the build From c65de2c0207ac67e5023ada8709490ef4627bd01 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 2 May 2014 22:03:59 +0000 Subject: [PATCH 164/219] return write error Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/server/server.go | 4 ++-- docs/sources/reference/api/docker_remote_api_v1.11.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/api/server/server.go b/api/server/server.go index 18c9a93d97..ab0f98fa47 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -978,8 +978,8 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { } func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Write([]byte{'O', 'K'}) - return nil + _, err := w.Write([]byte{'O', 'K'}) + return err } func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md index 5ad174565d..53e07b380c 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.11.md +++ b/docs/sources/reference/api/docker_remote_api_v1.11.md @@ -1181,6 +1181,7 @@ Ping the docker server Status Codes: - **200** - no error + - **500** - server error ### Create a new image from a container's changes From de75af9fe2d91df7297e498d320b496addfb52f4 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 1 May 2014 16:10:20 -0700 Subject: [PATCH 165/219] engine: catchall handler is shadowed by specific handlers This allows using `Engine.Register` and `Engine.RegisterCatchall` on the same engine without the catchall hiding all other handlers. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/engine.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/engine/engine.go b/engine/engine.go index dc1984ccb5..6f80e54b7e 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -118,13 +118,12 @@ func (eng *Engine) Job(name string, args ...string) *Job { if eng.Logging { job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) } - if eng.catchall != nil { + + // Catchall is shadowed by specific Register. + if handler, exists := eng.handlers[name]; exists { + job.handler = handler + } else if eng.catchall != nil { job.handler = eng.catchall - } else { - handler, exists := eng.handlers[name] - if exists { - job.handler = handler - } } return job } From 3b73c26194836c1e2b737146a5b0c840226c65d2 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 1 May 2014 18:39:46 -0700 Subject: [PATCH 166/219] Engine: empty job names are illegal, catchall or not Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/engine.go | 3 ++- engine/engine_test.go | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/engine/engine.go b/engine/engine.go index 6f80e54b7e..58b43eca04 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -122,7 +122,8 @@ func (eng *Engine) Job(name string, args ...string) *Job { // Catchall is shadowed by specific Register. if handler, exists := eng.handlers[name]; exists { job.handler = handler - } else if eng.catchall != nil { + } else if eng.catchall != nil && name != "" { + // empty job names are illegal, catchall or not. job.handler = eng.catchall } return job diff --git a/engine/engine_test.go b/engine/engine_test.go index 8023bd58f3..de7f74012e 100644 --- a/engine/engine_test.go +++ b/engine/engine_test.go @@ -133,3 +133,19 @@ func TestParseJob(t *testing.T) { t.Fatalf("Job was not called") } } + +func TestCatchallEmptyName(t *testing.T) { + eng := New() + var called bool + eng.RegisterCatchall(func(job *Job) Status { + called = true + return StatusOK + }) + err := eng.Job("").Run() + if err == nil { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } + if called { + t.Fatalf("Engine.Job(\"\").Run() should return an error") + } +} From 015a2abafa92ecc61fe5828a285a1e6dcfa07693 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Fri, 2 May 2014 23:27:39 +0200 Subject: [PATCH 167/219] cli.md: More typos I've seen one other missing space that I addressed in another PR already. I don't know whether that is a common occurrence in the docs. About the second diff chunk, it looks like some copy-paste mistake to me. Docker-DCO-1.1-Signed-off-by: Felix Rabe (github: felixrabe) --- docs/sources/reference/commandline/cli.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index cfcab2af47..08d3c2b4cf 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -537,7 +537,7 @@ Return low-level information on a container/image By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. -Go's[text/template](http://golang.org/pkg/text/template/) package +Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. ### Examples @@ -798,7 +798,7 @@ removed before the image is removed. $ sudo docker images REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) $ sudo docker rmi test Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 From f37ce76bf68d4935accd1018c904e80e42066f9f Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 1 May 2014 16:08:39 -0700 Subject: [PATCH 168/219] api/server: better error checking to avoid unnecessary panics Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/server/server.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/api/server/server.go b/api/server/server.go index 5db9df1901..0f887a8aea 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1267,6 +1267,9 @@ func ListenAndServe(proto, addr string, job *engine.Job) error { // ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { + if len(job.Args) == 0 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) @@ -1279,6 +1282,9 @@ func ServeApi(job *engine.Job) engine.Status { for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) + } go func() { log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) From 205bd91fcab30292ac5f246ce9bdbb045ad1023f Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Sat, 3 May 2014 02:11:00 +0200 Subject: [PATCH 169/219] run.md: Convert some backticks to apo's --- docs/sources/reference/run.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index a8acb97071..97012873d2 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -291,7 +291,7 @@ the container you might have an HTTP service listening on port 80 (and so you 42800. To help a new client container reach the server container's internal port -operator `--expose``d by the operator or `EXPOSE``d by the developer, the +operator `--expose`'d by the operator or `EXPOSE`'d by the developer, the operator has three choices: start the server container with `-P` or `-p,` or start the client container with `--link`. From 4a3b0e8d5d2653cfecbfee370be2406265211253 Mon Sep 17 00:00:00 2001 From: Felix Rabe Date: Sat, 3 May 2014 02:20:59 +0200 Subject: [PATCH 170/219] run.md: Close braces Docker-DCO-1.1-Signed-off-by: Felix Rabe (github: felixrabe) --- docs/sources/reference/run.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index a8acb97071..e2fb060d65 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -220,7 +220,7 @@ in `docker run`. We'll go through what the developer might have set in each Dockerfile instruction and how the operator can override that setting. - [CMD (Default Command or Options)](#cmd-default-command-or-options) - - [ENTRYPOINT (Default Command to Execute at Runtime]( + - [ENTRYPOINT (Default Command to Execute at Runtime)]( #entrypoint-default-command-to-execute-at-runtime) - [EXPOSE (Incoming Ports)](#expose-incoming-ports) - [ENV (Environment Variables)](#env-environment-variables) @@ -243,7 +243,7 @@ operator (the person running a container from the image), you can override that If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get appended as arguments to the `ENTRYPOINT`. -## ENTRYPOINT (Default Command to Execute at Runtime +## ENTRYPOINT (Default Command to Execute at Runtime) --entrypoint="": Overwrite the default entrypoint set by the image From 3d605683b3d272982399635a55ee81b2a7535e81 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 27 Apr 2014 15:06:09 -0700 Subject: [PATCH 171/219] Move 'auth' to the registry subsystem This is the first step towards separating the registry subsystem from the deprecated `Server` object. * New service `github.com/dotcloud/docker/registry/Service` * The service is installed by default in `builtins` * The service only exposes `auth` for now... * ...Soon to be followed by `pull`, `push` and `search`. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- builtins/builtins.go | 4 +++ registry/registry.go | 39 ++++++++++++++++++++++++++ registry/service.go | 54 ++++++++++++++++++++++++++++++++++++ server/server.go | 66 ++------------------------------------------ 4 files changed, 100 insertions(+), 63 deletions(-) create mode 100644 registry/service.go diff --git a/builtins/builtins.go b/builtins/builtins.go index 374bd48701..bd3b33d0d3 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -4,12 +4,16 @@ import ( api "github.com/dotcloud/docker/api/server" "github.com/dotcloud/docker/daemon/networkdriver/bridge" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/server" ) func Register(eng *engine.Engine) { daemon(eng) remote(eng) + // FIXME: engine.Installer.Install can fail. These errors + // should be passed up. + registry.NewService().Install(eng) } // remote: a RESTful api for cross-docker communication diff --git a/registry/registry.go b/registry/registry.go index 1bd73cdeb5..55154e364b 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -13,10 +13,12 @@ import ( "net/http/cookiejar" "net/url" "regexp" + "runtime" "strconv" "strings" "time" + "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/utils" ) @@ -757,3 +759,40 @@ func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, inde r.reqFactory = factory return r, nil } + +func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { + // FIXME: this replicates the 'info' job. + httpVersion := make([]utils.VersionInfo, 0, 4) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH}) + ud := utils.NewHTTPUserAgentDecorator(httpVersion...) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) + return factory +} + +// simpleVersionInfo is a simple implementation of +// the interface VersionInfo, which is used +// to provide version information for some product, +// component, etc. It stores the product name and the version +// in string and returns them on calls to Name() and Version(). +type simpleVersionInfo struct { + name string + version string +} + +func (v *simpleVersionInfo) Name() string { + return v.name +} + +func (v *simpleVersionInfo) Version() string { + return v.version +} diff --git a/registry/service.go b/registry/service.go new file mode 100644 index 0000000000..530a7f7afe --- /dev/null +++ b/registry/service.go @@ -0,0 +1,54 @@ +package registry + +import ( + "github.com/dotcloud/docker/engine" +) + +// Service exposes registry capabilities in the standard Engine +// interface. Once installed, it extends the engine with the +// following calls: +// +// 'auth': Authenticate against the public registry +// 'search': Search for images on the public registry (TODO) +// 'pull': Download images from any registry (TODO) +// 'push': Upload images to any registry (TODO) +type Service struct { +} + +// NewService returns a new instance of Service ready to be +// installed no an engine. +func NewService() *Service { + return &Service{} +} + +// Install installs registry capabilities to eng. +func (s *Service) Install(eng *engine.Engine) error { + eng.Register("auth", s.Auth) + return nil +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was sucessful. +// It can be used to verify the validity of a client's credentials. +func (s *Service) Auth(job *engine.Job) engine.Status { + var ( + err error + authConfig = &AuthConfig{} + ) + + job.GetenvJson("authConfig", authConfig) + // TODO: this is only done here because auth and registry need to be merged into one pkg + if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { + addr, err = ExpandAndVerifyRegistryUrl(addr) + if err != nil { + return job.Error(err) + } + authConfig.ServerAddress = addr + } + status, err := Login(authConfig, HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + return engine.StatusOK +} diff --git a/server/server.go b/server/server.go index f55107d3bd..16f9129311 100644 --- a/server/server.go +++ b/server/server.go @@ -139,7 +139,6 @@ func InitServer(job *engine.Job) engine.Status { "events": srv.Events, "push": srv.ImagePush, "containers": srv.Containers, - "auth": srv.Auth, } { if err := job.Eng.Register(name, handler); err != nil { return job.Error(err) @@ -148,24 +147,6 @@ func InitServer(job *engine.Job) engine.Status { return engine.StatusOK } -// simpleVersionInfo is a simple implementation of -// the interface VersionInfo, which is used -// to provide version information for some product, -// component, etc. It stores the product name and the version -// in string and returns them on calls to Name() and Version(). -type simpleVersionInfo struct { - name string - version string -} - -func (v *simpleVersionInfo) Name() string { - return v.name -} - -func (v *simpleVersionInfo) Version() string { - return v.version -} - // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. @@ -215,29 +196,6 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) Auth(job *engine.Job) engine.Status { - var ( - err error - authConfig = ®istry.AuthConfig{} - ) - - job.GetenvJson("authConfig", authConfig) - // TODO: this is only done here because auth and registry need to be merged into one pkg - if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() { - addr, err = registry.ExpandAndVerifyRegistryUrl(addr) - if err != nil { - return job.Error(err) - } - authConfig.ServerAddress = addr - } - status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) - if err != nil { - return job.Error(err) - } - job.Printf("%s\n", status) - return engine.StatusOK -} - func (srv *Server) Events(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s FROM", job.Name) @@ -654,7 +612,7 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) + r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) if err != nil { return job.Error(err) } @@ -1457,7 +1415,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { return job.Error(err) } - r, err := registry.NewRegistry(&authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) + r, err := registry.NewRegistry(&authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint) if err != nil { return job.Error(err) } @@ -1680,7 +1638,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { } img, err := srv.daemon.Graph().Get(localName) - r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) + r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint) if err2 != nil { return job.Error(err2) } @@ -2558,24 +2516,6 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) return srv, nil } -func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { - httpVersion := make([]utils.VersionInfo, 0, 4) - httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) - httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()}) - httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) - } - httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS}) - httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH}) - ud := utils.NewHTTPUserAgentDecorator(httpVersion...) - md := &utils.HTTPMetaHeadersDecorator{ - Headers: metaHeaders, - } - factory := utils.NewHTTPRequestFactory(ud, md) - return factory -} - func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { now := time.Now().UTC().Unix() jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} From c4089ad80bcc1466535696ac0b11d388df529391 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sun, 27 Apr 2014 15:21:42 -0700 Subject: [PATCH 172/219] Move 'search' to the registry subsystem This continues the effort to separate all registry logic from the deprecated `Server` object. * 'search' is exposed by `github.com/dotcloud/docker/registry/Service` * Added proper documentation of Search while I was at it Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- registry/service.go | 52 ++++++++++++++++++++++++++++++++++++++++++++- server/server.go | 34 ----------------------------- 2 files changed, 51 insertions(+), 35 deletions(-) diff --git a/registry/service.go b/registry/service.go index 530a7f7afe..1c7a93deac 100644 --- a/registry/service.go +++ b/registry/service.go @@ -9,7 +9,7 @@ import ( // following calls: // // 'auth': Authenticate against the public registry -// 'search': Search for images on the public registry (TODO) +// 'search': Search for images on the public registry // 'pull': Download images from any registry (TODO) // 'push': Upload images to any registry (TODO) type Service struct { @@ -24,6 +24,7 @@ func NewService() *Service { // Install installs registry capabilities to eng. func (s *Service) Install(eng *engine.Engine) error { eng.Register("auth", s.Auth) + eng.Register("search", s.Search) return nil } @@ -52,3 +53,52 @@ func (s *Service) Auth(job *engine.Job) engine.Status { job.Printf("%s\n", status) return engine.StatusOK } + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +// +// Argument syntax: search TERM +// +// Option environment: +// 'authConfig': json-encoded credentials to authenticate against the registry. +// The search extends to images only accessible via the credentials. +// +// 'metaHeaders': extra HTTP headers to include in the request to the registry. +// The headers should be passed as a json-encoded dictionary. +// +// Output: +// Results are sent as a collection of structured messages (using engine.Table). +// Each result is sent as a separate message. +// Results are ordered by number of stars on the public registry. +func (s *Service) Search(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = &AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + r, err := NewRegistry(authConfig, HTTPRequestFactory(metaHeaders), IndexServerAddress()) + if err != nil { + return job.Error(err) + } + results, err := r.SearchRepositories(term) + if err != nil { + return job.Error(err) + } + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} diff --git a/server/server.go b/server/server.go index 16f9129311..04cc17a35a 100644 --- a/server/server.go +++ b/server/server.go @@ -126,7 +126,6 @@ func InitServer(job *engine.Job) engine.Status { "insert": srv.ImageInsert, "attach": srv.ContainerAttach, "logs": srv.ContainerLogs, - "search": srv.ImagesSearch, "changes": srv.ContainerChanges, "top": srv.ContainerTop, "version": srv.DockerVersion, @@ -600,39 +599,6 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { return nil } -func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s TERM", job.Name) - } - var ( - term = job.Args[0] - metaHeaders = map[string][]string{} - authConfig = ®istry.AuthConfig{} - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - - r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) - if err != nil { - return job.Error(err) - } - results, err := r.SearchRepositories(term) - if err != nil { - return job.Error(err) - } - outs := engine.NewTable("star_count", 0) - for _, result := range results.Results { - out := &engine.Env{} - out.Import(result) - outs.Add(out) - } - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - // FIXME: 'insert' is deprecated and should be removed in a future version. func (srv *Server) ImageInsert(job *engine.Job) engine.Status { fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name) From 328d65dcff423b14e76f03ee65445032da31ed42 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Sat, 3 May 2014 00:54:52 +0000 Subject: [PATCH 173/219] remove fixme Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- builtins/builtins.go | 26 +++++++++++++++----------- docker/docker.go | 4 +++- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/builtins/builtins.go b/builtins/builtins.go index bd3b33d0d3..40d421f154 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -8,17 +8,19 @@ import ( "github.com/dotcloud/docker/server" ) -func Register(eng *engine.Engine) { - daemon(eng) - remote(eng) - // FIXME: engine.Installer.Install can fail. These errors - // should be passed up. - registry.NewService().Install(eng) +func Register(eng *engine.Engine) error { + if err := daemon(eng); err != nil { + return err + } + if err := remote(eng); err != nil { + return err + } + return registry.NewService().Install(eng) } // remote: a RESTful api for cross-docker communication -func remote(eng *engine.Engine) { - eng.Register("serveapi", api.ServeApi) +func remote(eng *engine.Engine) error { + return eng.Register("serveapi", api.ServeApi) } // daemon: a default execution and storage backend for Docker on Linux, @@ -36,7 +38,9 @@ func remote(eng *engine.Engine) { // // These components should be broken off into plugins of their own. // -func daemon(eng *engine.Engine) { - eng.Register("initserver", server.InitServer) - eng.Register("init_networkdriver", bridge.InitDriver) +func daemon(eng *engine.Engine) error { + if err := eng.Register("initserver", server.InitServer); err != nil { + return err + } + return eng.Register("init_networkdriver", bridge.InitDriver) } diff --git a/docker/docker.go b/docker/docker.go index 7c366001b7..db33341413 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -128,7 +128,9 @@ func main() { eng := engine.New() // Load builtins - builtins.Register(eng) + if err := builtins.Register(eng); err != nil { + log.Fatal(err) + } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting From dca1c0073f42b0d75e914119eae863d6e6087cd6 Mon Sep 17 00:00:00 2001 From: Mateusz Sulima Date: Sat, 3 May 2014 12:22:33 +0200 Subject: [PATCH 174/219] hello_world.md - $container_id variable case sensitivity If you run the tutorial step-by-step, following error occurs: ```$ sudo docker logs $container_id Usage: docker logs CONTAINER Fetch the logs of a container -f, --follow=false: Follow log output``` This is obviously because bash variables are case-sensitive, so it mustn't be `CONTAINER_ID` above. Docker-DCO-1.1-Signed-off-by: Mateusz Sulima (github: github_handle) --- docs/sources/examples/hello_world.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/examples/hello_world.md b/docs/sources/examples/hello_world.md index 48f4a43102..177857816c 100644 --- a/docs/sources/examples/hello_world.md +++ b/docs/sources/examples/hello_world.md @@ -80,7 +80,7 @@ continue to do this until we stop it. **Steps:** - $ CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") + $ container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") We are going to run a simple hello world daemon in a new container made from the `ubuntu` image. From bfac0b24ed66277c66807466e9d429624b1179e6 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Sun, 4 May 2014 03:16:21 +0200 Subject: [PATCH 175/219] Fixed a couple of single dashes in links document Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/sources/use/working_with_links_names.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md index 40260feabf..dab66cef06 100644 --- a/docs/sources/use/working_with_links_names.md +++ b/docs/sources/use/working_with_links_names.md @@ -50,7 +50,7 @@ For example, there is an image called `crosbymichael/redis` that exposes the port 6379 and starts the Redis server. Let's name the container as `redis` based on that image and run it as daemon. - $ sudo docker run -d -name redis crosbymichael/redis + $ sudo docker run -d --name redis crosbymichael/redis We can issue all the commands that you would expect using the name `redis`; start, stop, attach, using the name for our container. The name also allows @@ -61,9 +61,9 @@ apply a link to connect both containers. If you noticed when running our Redis server we did not use the `-p` flag to publish the Redis port to the host system. Redis exposed port 6379 and this is all we need to establish a link. - $ sudo docker run -t -i -link redis:db -name webapp ubuntu bash + $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash -When you specified `-link redis:db` you are telling Docker to link the +When you specified `--link redis:db` you are telling Docker to link the container named `redis` into this new container with the alias `db`. Environment variables are prefixed with the alias so that the parent container can access network and environment information from the containers that are From 8d7ed2cae49918c9f31e9fd068b28c8e114e939b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Sat, 3 May 2014 20:34:21 -0600 Subject: [PATCH 176/219] Update vendored deps that have a proper version number to use said specific versions Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/vendor.sh | 4 ++-- .../coreos/go-systemd/dbus/methods_test.go | 3 ++- vendor/src/github.com/coreos/go-systemd/dbus/set.go | 7 +++++++ .../github.com/coreos/go-systemd/dbus/set_test.go | 13 +++++++++++++ .../go-systemd/fixtures/enable-disable.service | 5 +++++ 5 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service diff --git a/hack/vendor.sh b/hack/vendor.sh index 4200d90867..79322cd9af 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -59,5 +59,5 @@ rm -rf src/code.google.com/p/go mkdir -p src/code.google.com/p/go/src/pkg/archive mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar -clone git github.com/godbus/dbus cb98efbb933d8389ab549a060e880ea3c375d213 -clone git github.com/coreos/go-systemd 4c14ed39b8a643ac44b4f95b5a53c00e94261475 +clone git github.com/godbus/dbus v1 +clone git github.com/coreos/go-systemd v1 diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go index 9e2f22323f..d943e7ebfc 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -18,12 +18,13 @@ package dbus import ( "fmt" - "github.com/guelfey/go.dbus" "math/rand" "os" "path/filepath" "reflect" "testing" + + "github.com/godbus/dbus" ) func setupConn(t *testing.T) *Conn { diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set.go b/vendor/src/github.com/coreos/go-systemd/dbus/set.go index 88378b29a1..45ad1fb399 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/set.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/set.go @@ -21,6 +21,13 @@ func (s *set) Length() (int) { return len(s.data) } +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + func newSet() (*set) { return &set{make(map[string] bool)} } diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go index d8d174d0c4..c4435f8800 100644 --- a/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go +++ b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go @@ -18,9 +18,22 @@ func TestBasicSetActions(t *testing.T) { t.Fatal("set should contain 'foo'") } + v := s.Values() + if len(v) != 1 { + t.Fatal("set.Values did not report correct number of values") + } + if v[0] != "foo" { + t.Fatal("set.Values did not report value") + } + s.Remove("foo") if s.Contains("foo") { t.Fatal("set should not contain 'foo'") } + + v = s.Values() + if len(v) != 0 { + t.Fatal("set.Values did not report correct number of values") + } } diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service b/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service new file mode 100644 index 0000000000..74c9459088 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/fixtures/enable-disable.service @@ -0,0 +1,5 @@ +[Unit] +Description=enable disable test + +[Service] +ExecStart=/bin/sleep 400 From 6799d14cb8cb9986d4a38473cddd009b96e717c8 Mon Sep 17 00:00:00 2001 From: lukemarsden Date: Sun, 4 May 2014 17:52:48 +0100 Subject: [PATCH 177/219] Update devenvironment.md `git clone` should use `https` URL. --- docs/sources/contributing/devenvironment.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md index bcefa00369..24e250dbb0 100644 --- a/docs/sources/contributing/devenvironment.md +++ b/docs/sources/contributing/devenvironment.md @@ -32,7 +32,7 @@ Again, you can do it in other ways but you need to do more work. ## Check out the Source - $ git clone http://git@github.com/dotcloud/docker + $ git clone https://git@github.com/dotcloud/docker $ cd docker To checkout a different revision just use `git checkout` From a304dcef00d639b2f5dbf8d7561f1f8de7124573 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Mon, 5 May 2014 12:54:10 +0200 Subject: [PATCH 178/219] nat: Fix --expose protocol parsing A command like: docker run --expose 5353/tcp -P fedora sleep 10 Currently fails with: Error: Cannot start container 5c558de5f0bd85ff14e13e3691aefbe531346297a27d4b3562732baa8785b34a: unknown protocol This is because nat.SplitProtoPort() confuses the order of the port and proto in 5353/tcp, assuming the protocol is first. However, in all other places in docker the protocol is last, so the fix is just to swap these. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- nat/nat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nat/nat.go b/nat/nat.go index f3af362f8b..7aad775d70 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -69,7 +69,7 @@ func SplitProtoPort(rawPort string) (string, string) { if l == 1 { return "tcp", rawPort } - return parts[0], parts[1] + return parts[1], parts[0] } // We will receive port specs in the format of ip:public:private/proto and these need to be From 46755dfc1aa30a418a11dc8e352c600ef365b969 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 5 May 2014 10:38:44 +1000 Subject: [PATCH 179/219] Rearrange the existing info a little, and add example style guide Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/README.md | 55 +++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/docs/README.md b/docs/README.md index bbc741d593..47b390bda4 100755 --- a/docs/README.md +++ b/docs/README.md @@ -1,8 +1,4 @@ -Docker Documentation -==================== - -Overview --------- +# Docker Documentation The source for Docker documentation is here under `sources/` and uses extended Markdown, as implemented by [mkdocs](http://mkdocs.org). @@ -37,8 +33,13 @@ may include features not yet part of any official docker release. The development and `docs.docker.io` (which points to the `docs` branch`) should be used for the latest official release. -Getting Started ---------------- +## Contributing + +- Follow the contribution guidelines ([see + `../CONTRIBUTING.md`](../CONTRIBUTING.md)). +- [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) + +## Getting Started Docker documentation builds are done in a Docker container, which installs all the required tools, adds the local `docs/` directory and @@ -47,40 +48,40 @@ you can connect and see your changes. In the root of the `docker` source directory: - cd docker - -Run: - make docs If you have any issues you need to debug, you can use `make docs-shell` and then run `mkdocs serve` -# Contributing +### Examples -* Follow the contribution guidelines ([see - `../CONTRIBUTING.md`](../CONTRIBUTING.md)). -* [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work) +When writing examples give the user hints by making them resemble what +they see in their shell: -Working using GitHub's file editor ----------------------------------- +- Indent shell examples by 4 spaces so they get rendered as code. +- Start typed commands with `$ ` (dollar space), so that they are easily +differentiated from program output. +- Program output has no prefix. +- Comments begin with `# ` (hash space). +- In-container shell commands begin with `$$ ` (dollar dollar space). -Alternatively, for small changes and typos you might want to use -GitHub's built in file editor. It allows you to preview your changes -right on-line (though there can be some differences between GitHub -Markdown and mkdocs Markdown). Just be careful not to create many commits. -And you must still [sign your work!](../CONTRIBUTING.md#sign-your-work) - -Images ------- +### Images When you need to add images, try to make them as small as possible (e.g. as gifs). Usually images should go in the same directory as the `.md` file which references them, or in a subdirectory if one already exists. -Publishing Documentation ------------------------- +## Working using GitHub's file editor + +Alternatively, for small changes and typos you might want to use +GitHub's built in file editor. It allows you to preview your changes +right on-line (though there can be some differences between GitHub +Markdown and [MkDocs Markdown](http://www.mkdocs.org/user-guide/writing-your-docs/)). +Just be careful not to create many commits. And you must still +[sign your work!](../CONTRIBUTING.md#sign-your-work) + +## Publishing Documentation To publish a copy of the documentation you need a `docs/awsconfig` file containing AWS settings to deploy to. The release script will From e625bad3d7e5eb08568cbd09d9d574b8c63fd203 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Fri, 2 May 2014 16:47:29 -0400 Subject: [PATCH 180/219] Fix docker man page to reference selinux-enable flag Docker-DCO-1.1-Signed-off-by: Daniel Walsh (github: rhatdan) --- contrib/man/md/docker.1.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/man/md/docker.1.md b/contrib/man/md/docker.1.md index 02c541262b..d1ddf192b5 100644 --- a/contrib/man/md/docker.1.md +++ b/contrib/man/md/docker.1.md @@ -23,7 +23,7 @@ its own man page which explain usage and arguements. To see the man page for a command run **man docker **. # OPTIONS -**-D**=*ture*|*false* +**-D**=*true*|*false* Enable debug mode. Default is false. **-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or @@ -73,6 +73,9 @@ port=[4243] or path =[/var/run/docker.sock] is omitted, default values are used. **-v**=*true*|*false* Print version information and quit. Default is false. +**--selinux-enabled=*true*|*false* + Enable selinux support. Default is false. + # COMMANDS **docker-attach(1)** Attach to a running container From 56d71ae79bbbdd0d1eb97408c6132687a33ec113 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 5 May 2014 22:52:12 +1000 Subject: [PATCH 181/219] Several reader issues fixed - Fix boot2docker url - move HomeBrew instructions to a separate section - fix docker client 5-liner to work (its still ugly) - fix and update program output Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/mac.md | 65 ++++++++++++++------------------ 1 file changed, 29 insertions(+), 36 deletions(-) diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 15736f5c6c..d5b65cd5ab 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -28,22 +28,14 @@ Once the download is complete, open the disk image, run the set up file (i.e. `VirtualBox.pkg`) and install VirtualBox. Do not simply copy the package without running the installer. -### boot2docker +### Manual Installation +#### boot2docker [boot2docker](https://github.com/boot2docker/boot2docker) provides a -handy script to easily manage the VM running the `docker` +handy script to manage the VM running the `docker` daemon. It also takes care of the installation for the OS image that is used for the job. -#### With Homebrew - -If you are using Homebrew on your machine, simply run the following -command to install `boot2docker`: - - $ brew install boot2docker - -#### Manual installation - Open up a new terminal window, if you have not already. Run the following commands to get boot2docker: @@ -52,33 +44,23 @@ Run the following commands to get boot2docker: $ cd ~/bin # Get the file - $ curl https://raw.github.com/boot2docker/boot2docker/master/boot2docker > boot2docker + $ curl https://raw.githubusercontent.com/boot2docker/boot2docker/master/boot2docker > boot2docker # Mark it executable $ chmod +x boot2docker -### Docker OS X Client +#### Docker OS X Client -The `docker` daemon is accessed using the -`docker` client. - -#### With Homebrew - -Run the following command to install the `docker` -client: - - $ brew install docker - -#### Manual installation +The `docker` daemon is accessed using the `docker` client. Run the following commands to get it downloaded and set up: # Get the docker client file $ DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ - $ curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ - $ gunzip $DIR/ld.tgz && \ - $ tar xvf $DIR/ld.tar -C $DIR/ && \ - $ cp $DIR/usr/local/bin/docker ./docker + curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ + gunzip $DIR/ld.tgz && \ + tar xvf $DIR/ld.tar -C $DIR/ && \ + cp $DIR/usr/local/bin/docker ./docker # Set the environment variable for the docker daemon $ export DOCKER_HOST=tcp://127.0.0.1:4243 @@ -87,6 +69,18 @@ Run the following commands to get it downloaded and set up: $ sudo mkdir -p /usr/local/bin $ sudo cp docker /usr/local/bin/ +### (OR) With Homebrew + +If you are using Homebrew on your machine, simply run the following +command to install `boot2docker`: + + $ brew install boot2docker + +Run the following command to install the `docker` +client: + + $ brew install docker + And that's it! Let's check out how to use it. ## How To Use Docker On Mac OS X @@ -104,8 +98,7 @@ commands: # To see all available commands: $ ./boot2docker - - # Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} + Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} ### The `docker` client @@ -114,12 +107,12 @@ use the `docker` client just like any other application. $ docker version - # Client version: 0.7.6 - # Go version (client): go1.2 - # Git commit (client): bc3b2ec - # Server version: 0.7.5 - # Git commit (server): c348c04 - # Go version (server): go1.2 + Client version: 0.10.0 + Client API version: 1.10 + Server version: 0.10.0 + Server API version: 1.10 + Last stable version: 0.10.0 + ### Forwarding VM Port Range to Host From 10766e1fb46770a407b2b17ade313dd5ae85054e Mon Sep 17 00:00:00 2001 From: Aaron Huslage Date: Mon, 5 May 2014 10:28:52 -0400 Subject: [PATCH 182/219] Post-commit hook URL fix Updating CONTRIBUTING to include the correct URL for the post-commit hook. Docker-DCO-1.1-Signed-off-by: Aaron Huslage (github: huslage) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a1ad4b0ab..d77afbc443 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -82,7 +82,7 @@ editors have plugins that do this automatically, and there's also a git pre-commit hook: ``` -curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit +curl -o .git/hooks/pre-commit https://raw.githubusercontent.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit ``` Pull requests descriptions should be as clear as possible and include a From c34bb099e51300f4355f949cc514bab8370c82c1 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Mon, 5 May 2014 16:42:23 +0200 Subject: [PATCH 183/219] Fixed the horrible OSX installation docs Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- docs/sources/installation/mac.md | 169 +++++++++++++++++-------------- 1 file changed, 92 insertions(+), 77 deletions(-) diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index d5b65cd5ab..c30e0b6440 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -1,12 +1,8 @@ -page_title: Installation on Mac OS X 10.6 Snow Leopard -page_description: Please note this project is currently under heavy development. It should not be used in production. -page_keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linux, os x, osx, mac +page_title: Installation on Mac OS X +page_description: Instructions for installing Docker on OS X using boot2docker. +page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualBox, SSH, Linux, OSX, OS X, Mac -# Mac OS X - -> **Note**: -> These instructions are available with the new release of Docker (version -> 0.8). However, they are subject to change. +# Installing Docker on Mac OS X > **Note**: > Docker is still under heavy development! We don't recommend using it in @@ -14,33 +10,50 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, ssh, linu > our blog post, [Getting to Docker 1.0]( > http://blog.docker.io/2013/08/getting-to-docker-1-0/) -Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. +> **Note:** +> Docker is supported on Mac OS X 10.6 "Snow Leopard" or newer. -## How To Install Docker On Mac OS X +Docker has two key components: the Docker daemon and the `docker` binary +which acts as a client. The client passes instructions to the daemon +which builds, runs and manages your Docker containers. As Docker uses +some Linux-specific kernel features you can't use it directly on OS X. +Instead we run the Docker daemon inside a lightweight virtual machine on your local +OS X host. We can then use a native client `docker` binary to communicate +with the Docker daemon inside our virtual machine. To make this process +easier we've designed a helper application called +[boot2docker](https://github.com/boot2docker/boot2docker) to install +that virtual machine and run our Docker daemon. -### VirtualBox +[boot2docker](https://github.com/boot2docker/boot2docker) uses +VirtualBox to create the virtual machine so we'll need to install that +first. + +## Installing VirtualBox Docker on OS X needs VirtualBox to run. To begin with, head over to [VirtualBox Download Page](https://www.virtualbox.org/wiki/Downloads) and get the tool for `OS X hosts x86/amd64`. -Once the download is complete, open the disk image, run the set up file -(i.e. `VirtualBox.pkg`) and install VirtualBox. Do -not simply copy the package without running the installer. +Once the download is complete, open the disk image, run `VirtualBox.pkg` +and install VirtualBox. -### Manual Installation -#### boot2docker +> **Note**: +> Do not simply copy the package without running the +> installer. + +## Installing boot2docker + +### Installing manually [boot2docker](https://github.com/boot2docker/boot2docker) provides a -handy script to manage the VM running the `docker` -daemon. It also takes care of the installation for the OS -image that is used for the job. +handy script to manage the VM running the Docker daemon. It also takes +care of the installation of that VM. -Open up a new terminal window, if you have not already. - -Run the following commands to get boot2docker: +Open up a new terminal window and run the following commands to get +boot2docker: # Enter the installation directory + $ mkdir -p ~/bin $ cd ~/bin # Get the file @@ -49,62 +62,69 @@ Run the following commands to get boot2docker: # Mark it executable $ chmod +x boot2docker -#### Docker OS X Client +### Installing the Docker OS X Client -The `docker` daemon is accessed using the `docker` client. +The Docker daemon is accessed using the `docker` binary. Run the following commands to get it downloaded and set up: - # Get the docker client file + # Get the docker binary $ DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ gunzip $DIR/ld.tgz && \ tar xvf $DIR/ld.tar -C $DIR/ && \ cp $DIR/usr/local/bin/docker ./docker - # Set the environment variable for the docker daemon - $ export DOCKER_HOST=tcp://127.0.0.1:4243 - # Copy the executable file $ sudo mkdir -p /usr/local/bin $ sudo cp docker /usr/local/bin/ -### (OR) With Homebrew +### Configure the Docker OS X Client + +The Docker client, `docker`, uses an environment variable `DOCKER_HOST` +to specify the location of the Docker daemon to connect to. Specify your +local boot2docker virtual machine as the value of that variable. + + $ export DOCKER_HOST=tcp://127.0.0.1:4243 + +## Installing boot2docker with Homebrew If you are using Homebrew on your machine, simply run the following command to install `boot2docker`: $ brew install boot2docker -Run the following command to install the `docker` -client: +Run the following command to install the Docker client: $ brew install docker And that's it! Let's check out how to use it. -## How To Use Docker On Mac OS X +# How To Use Docker On Mac OS X -### The `docker` daemon (via boot2docker) +## Running the Docker daemon via boot2docker -Inside the `~/bin` directory, run the following -commands: +Firstly we need to initialize our boot2docker virtual machine. Run the +`boot2docker` command. - # Initiate the VM - $ ./boot2docker init + $ boot2docker init - # Run the VM (the docker daemon) - $ ./boot2docker up +This will setup our initial virtual machine. - # To see all available commands: - $ ./boot2docker +Next we need to start the Docker daemon. + + $ boot2docker up + +There are a variety of others commands available using the `boot2docker` +script. You can see these like so: + + $ boot2docker Usage ./boot2docker {init|start|up|pause|stop|restart|status|info|delete|ssh|download} -### The `docker` client +## The Docker client -Once the VM with the `docker` daemon is up, you can -use the `docker` client just like any other -application. +Once the virtual machine with the Docker daemon is up, you can use the `docker` +binary just like any other application. $ docker version Client version: 0.10.0 @@ -113,20 +133,23 @@ application. Server API version: 1.10 Last stable version: 0.10.0 +## Using Docker port forwarding with boot2docker -### Forwarding VM Port Range to Host +In order to forward network ports from Docker with boot2docker we need to +manually forward the port range Docker uses inside VirtualBox. To do +this we take the port range that Docker uses by default with the `-P` +option, ports 49000-49900, and run the following command. -If we take the port range that docker uses by default with the -P option -(49000-49900), and forward same range from host to vm, we'll be able to -interact with our containers as if they were running locally: +> **Note:** +> The boot2docker virtual machine must be powered off for this +> to work. - # vm must be powered off for i in {49000..49900}; do VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; done -### SSH-ing The VM +## Connecting to the VM via SSH If you feel the need to connect to the VM, you can simply run: @@ -135,37 +158,29 @@ If you feel the need to connect to the VM, you can simply run: # User: docker # Pwd: tcuser -You can now continue with the [*Hello -World*](/examples/hello_world/#hello-world) example. +If SSH complains about keys then run: -## Learn More + $ ssh-keygen -R '[localhost]:2022' -### boot2docker: +## Upgrading to a newer release of boot2docker + +To upgrade an initialized boot2docker virtual machine, you can use the +following 3 commands. Your virtual machine's disk will not be changed, +so you won't lose your images and containers: + + $ boot2docker stop + $ boot2docker download + $ boot2docker start + +# Learn More + +## boot2docker See the GitHub page for [boot2docker](https://github.com/boot2docker/boot2docker). -### If SSH complains about keys: +# Next steps - $ ssh-keygen -R '[localhost]:2022' +You can now continue with the [*Hello +World*](/examples/hello_world/#hello-world) example. -### Upgrading to a newer release of boot2docker - -To upgrade an initialised VM, you can use the following 3 commands. Your -persistence disk will not be changed, so you won't lose your images and -containers: - - $ ./boot2docker stop - $ ./boot2docker download - $ ./boot2docker start - -### About the way Docker works on Mac OS X: - -Docker has two key components: the `docker` daemon and the `docker` client. -The tool works by client commanding the daemon. In order to work and do its -magic, the daemon makes use of some Linux Kernel features (e.g. LXC, name -spaces etc.), which are not supported by OS X. Therefore, the solution of -getting Docker to run on OS X consists of running it inside a lightweight -virtual machine. In order to simplify things, Docker comes with a bash -script to make this whole process as easy as possible (i.e. -boot2docker). From a60159f3b102244fc5470642bd32eb99d5ac329c Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Wed, 30 Apr 2014 15:46:56 -0700 Subject: [PATCH 184/219] runconfig: add -net container:name option Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- daemon/container.go | 15 +++++++-- daemon/execdriver/driver.go | 5 +-- daemon/execdriver/native/create.go | 16 +++++++++ runconfig/hostconfig.go | 24 ++++++++------ runconfig/parse.go | 53 +++++++++++++++++++++--------- 5 files changed, 84 insertions(+), 29 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 1c6dc077dc..22c2ef3abe 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -325,7 +325,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s }) } -func populateCommand(c *Container, env []string) { +func populateCommand(c *Container, env []string) error { var ( en *execdriver.Network context = make(map[string][]string) @@ -351,6 +351,14 @@ func populateCommand(c *Container, env []string) { // TODO: this can be removed after lxc-conf is fully deprecated mergeLxcConfIntoOptions(c.hostConfig, context) + if netContainer := c.hostConfig.UseContainerNetwork; netContainer != "" { + nc := c.daemon.Get(netContainer) + if nc == nil { + return fmt.Errorf("no such container to join network: %q", netContainer) + } + en.ContainerID = nc.ID + } + resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, @@ -372,6 +380,7 @@ func populateCommand(c *Container, env []string) { } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} c.command.Env = env + return nil } func (container *Container) Start() (err error) { @@ -415,7 +424,9 @@ func (container *Container) Start() (err error) { if err := container.setupWorkingDirectory(); err != nil { return err } - populateCommand(container, env) + if err := populateCommand(container, env); err != nil { + return err + } if err := setupMountsForContainer(container); err != nil { return err } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 27a575cb3a..994a27e501 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -89,8 +89,9 @@ type Driver interface { // Network settings of the container type Network struct { - Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled - Mtu int `json:"mtu"` + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` + ContainerID string `json:"container_id"` // id of the container to join network. } type NetworkInterface struct { diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 5562d08986..b2dd395bb5 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -3,6 +3,7 @@ package native import ( "fmt" "os" + "path/filepath" "github.com/dotcloud/docker/daemon/execdriver" "github.com/dotcloud/docker/daemon/execdriver/native/configuration" @@ -75,6 +76,21 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. } container.Networks = append(container.Networks, &vethNetwork) } + + if c.Network.ContainerID != "" { + cmd := d.activeContainers[c.Network.ContainerID] + if cmd == nil || cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) + } + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + Context: libcontainer.Context{ + "nspath": nspath, + }, + }) + } + return nil } diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 3235bf1f4e..dce88c4460 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -7,16 +7,17 @@ import ( ) type HostConfig struct { - Binds []string - ContainerIDFile string - LxcConf []utils.KeyValuePair - Privileged bool - PortBindings nat.PortMap - Links []string - PublishAllPorts bool - Dns []string - DnsSearch []string - VolumesFrom []string + Binds []string + ContainerIDFile string + LxcConf []utils.KeyValuePair + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool + Dns []string + DnsSearch []string + VolumesFrom []string + UseContainerNetwork string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -42,5 +43,8 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } + if UseContainerNetwork := job.Getenv("UseContainerNetwork"); UseContainerNetwork != "" { + hostConfig.UseContainerNetwork = UseContainerNetwork + } return hostConfig } diff --git a/runconfig/parse.go b/runconfig/parse.go index d395b49e80..06e380d4fa 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -2,14 +2,15 @@ package runconfig import ( "fmt" + "io/ioutil" + "path" + "strings" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/opts" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/utils" - "io/ioutil" - "path" - "strings" ) var ( @@ -61,7 +62,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - + flNetMode = cmd.String([]string{"#net", "-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'disable': disable networking for this container, 'container:name_or_id': reuses another container network stack)") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") @@ -197,6 +198,11 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf // boo, there's no debug output for docker run //utils.Debugf("Environment variables for the container: %#v", envVariables) + netMode, useContainerNetwork, err := parseNetMode(*flNetMode) + if err != nil { + return nil, nil, cmd, fmt.Errorf("-net: invalid net mode: %v", err) + } + config := &Config{ Hostname: hostname, Domainname: domainname, @@ -204,7 +210,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf ExposedPorts: ports, User: *flUser, Tty: *flTty, - NetworkDisabled: !*flNetwork, + NetworkDisabled: !*flNetwork || netMode == "disable", OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, @@ -220,16 +226,17 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf } hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - LxcConf: lxcConf, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - Dns: flDns.GetAll(), - DnsSearch: flDnsSearch.GetAll(), - VolumesFrom: flVolumesFrom.GetAll(), + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + UseContainerNetwork: useContainerNetwork, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -274,3 +281,19 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { } return out, nil } + +func parseNetMode(netMode string) (string, string, error) { + parts := strings.Split(netMode, ":") + if len(parts) < 1 { + return "", "", fmt.Errorf("'netmode' cannot be empty", netMode) + } + mode := parts[0] + var container string + if mode == "container" { + if len(parts) < 2 { + return "", "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) + } + container = parts[1] + } + return mode, container, nil +} From 7118416aeeb779373685d192c26a329e9acdef89 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Fri, 2 May 2014 01:47:12 -0700 Subject: [PATCH 185/219] runconfig/parse: add test for parseNetMode Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- runconfig/parse.go | 17 +++++++++-------- runconfig/parse_test.go | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/runconfig/parse.go b/runconfig/parse.go index 06e380d4fa..262a04eb4b 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -284,16 +284,17 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { func parseNetMode(netMode string) (string, string, error) { parts := strings.Split(netMode, ":") - if len(parts) < 1 { - return "", "", fmt.Errorf("'netmode' cannot be empty", netMode) - } - mode := parts[0] - var container string - if mode == "container" { - if len(parts) < 2 { + switch mode := parts[0]; mode { + case "bridge", "disable": + return mode, "", nil + case "container": + var container string + if len(parts) < 2 || parts[1] == "" { return "", "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) } container = parts[1] + return mode, container, nil + default: + return "", "", fmt.Errorf("invalid netmode: %q", netMode) } - return mode, container, nil } diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index fd28c4593e..9ac925f2ac 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -1,8 +1,9 @@ package runconfig import ( - "github.com/dotcloud/docker/utils" "testing" + + "github.com/dotcloud/docker/utils" ) func TestParseLxcConfOpt(t *testing.T) { @@ -21,3 +22,33 @@ func TestParseLxcConfOpt(t *testing.T) { } } } + +func TestParseNetMode(t *testing.T) { + testFlags := []struct { + flag string + mode string + container string + err bool + }{ + {"", "", "", true}, + {"bridge", "bridge", "", false}, + {"disable", "disable", "", false}, + {"container:foo", "container", "foo", false}, + {"container:", "", "", true}, + {"container", "", "", true}, + {"unknown", "", "", true}, + } + + for _, to := range testFlags { + mode, container, err := parseNetMode(to.flag) + if mode != to.mode { + t.Fatalf("-net %s: expected net mode: %q, got: %q", to.flag, to.mode, mode) + } + if container != to.container { + t.Fatalf("-net %s: expected net container: %q, got: %q", to.flag, to.container, container) + } + if (err != nil) != to.err { + t.Fatal("-net %s: expected an error got none", to.flag) + } + } +} From 2c2cc051d831f54d1bb070642edcd876ff669e78 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 14:06:05 -0700 Subject: [PATCH 186/219] Update --net flags and container mode Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 35 ++++++++++++++++++-------------- runconfig/hostconfig.go | 26 +++++++++++------------- runconfig/parse.go | 44 ++++++++++++++++++++--------------------- runconfig/parse_test.go | 2 +- 4 files changed, 54 insertions(+), 53 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 22c2ef3abe..bbd4aa6a58 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -338,27 +338,32 @@ func populateCommand(c *Container, env []string) error { Interface: nil, } - if !c.Config.NetworkDisabled { - network := c.NetworkSettings - en.Interface = &execdriver.NetworkInterface{ - Gateway: network.Gateway, - Bridge: network.Bridge, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, + parts := strings.SplitN(c.hostConfig.NetworkMode, ":", 2) + switch parts[0] { + case "none": + case "bridge": + if !c.Config.NetworkDisabled { + network := c.NetworkSettings + en.Interface = &execdriver.NetworkInterface{ + Gateway: network.Gateway, + Bridge: network.Bridge, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + } } + case "container": + nc := c.daemon.Get(parts[1]) + if nc == nil { + return fmt.Errorf("no such container to join network: %q", parts[1]) + } + en.ContainerID = nc.ID + default: + return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } // TODO: this can be removed after lxc-conf is fully deprecated mergeLxcConfIntoOptions(c.hostConfig, context) - if netContainer := c.hostConfig.UseContainerNetwork; netContainer != "" { - nc := c.daemon.Get(netContainer) - if nc == nil { - return fmt.Errorf("no such container to join network: %q", netContainer) - } - en.ContainerID = nc.ID - } - resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index dce88c4460..83688367e3 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -7,17 +7,17 @@ import ( ) type HostConfig struct { - Binds []string - ContainerIDFile string - LxcConf []utils.KeyValuePair - Privileged bool - PortBindings nat.PortMap - Links []string - PublishAllPorts bool - Dns []string - DnsSearch []string - VolumesFrom []string - UseContainerNetwork string + Binds []string + ContainerIDFile string + LxcConf []utils.KeyValuePair + Privileged bool + PortBindings nat.PortMap + Links []string + PublishAllPorts bool + Dns []string + DnsSearch []string + VolumesFrom []string + NetworkMode string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -25,6 +25,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), + NetworkMode: job.Getenv("NetworkMode"), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) @@ -43,8 +44,5 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } - if UseContainerNetwork := job.Getenv("UseContainerNetwork"); UseContainerNetwork != "" { - hostConfig.UseContainerNetwork = UseContainerNetwork - } return hostConfig } diff --git a/runconfig/parse.go b/runconfig/parse.go index 262a04eb4b..42ad5c1958 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -50,7 +50,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") - flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") + flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") @@ -62,7 +62,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flNetMode = cmd.String([]string{"#net", "-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'disable': disable networking for this container, 'container:name_or_id': reuses another container network stack)") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:name_or_id': reuses another container network stack)") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") @@ -198,7 +198,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf // boo, there's no debug output for docker run //utils.Debugf("Environment variables for the container: %#v", envVariables) - netMode, useContainerNetwork, err := parseNetMode(*flNetMode) + netMode, err := parseNetMode(*flNetMode) if err != nil { return nil, nil, cmd, fmt.Errorf("-net: invalid net mode: %v", err) } @@ -210,7 +210,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf ExposedPorts: ports, User: *flUser, Tty: *flTty, - NetworkDisabled: !*flNetwork || netMode == "disable", + NetworkDisabled: !*flNetwork, OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, @@ -226,17 +226,17 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf } hostConfig := &HostConfig{ - Binds: binds, - ContainerIDFile: *flContainerIDFile, - LxcConf: lxcConf, - Privileged: *flPrivileged, - PortBindings: portBindings, - Links: flLinks.GetAll(), - PublishAllPorts: *flPublishAll, - Dns: flDns.GetAll(), - DnsSearch: flDnsSearch.GetAll(), - VolumesFrom: flVolumesFrom.GetAll(), - UseContainerNetwork: useContainerNetwork, + Binds: binds, + ContainerIDFile: *flContainerIDFile, + LxcConf: lxcConf, + Privileged: *flPrivileged, + PortBindings: portBindings, + Links: flLinks.GetAll(), + PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), + VolumesFrom: flVolumesFrom.GetAll(), + NetworkMode: netMode, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -282,19 +282,17 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { return out, nil } -func parseNetMode(netMode string) (string, string, error) { +func parseNetMode(netMode string) (string, error) { parts := strings.Split(netMode, ":") switch mode := parts[0]; mode { - case "bridge", "disable": - return mode, "", nil + case "bridge", "none": + return mode, nil case "container": - var container string if len(parts) < 2 || parts[1] == "" { - return "", "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) + return "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) } - container = parts[1] - return mode, container, nil + return netMode, nil default: - return "", "", fmt.Errorf("invalid netmode: %q", netMode) + return "", fmt.Errorf("invalid netmode: %q", netMode) } } diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index 9ac925f2ac..e1b4cf9f93 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -40,7 +40,7 @@ func TestParseNetMode(t *testing.T) { } for _, to := range testFlags { - mode, container, err := parseNetMode(to.flag) + mode, err := parseNetMode(to.flag) if mode != to.mode { t.Fatalf("-net %s: expected net mode: %q, got: %q", to.flag, to.mode, mode) } From a785882b29b9f0b24ace8249576c5d8d7f8c1d94 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 14:17:31 -0700 Subject: [PATCH 187/219] Setup host networking for lxc and native Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 2 ++ daemon/execdriver/driver.go | 7 ++++--- daemon/execdriver/lxc/init.go | 9 +++++---- daemon/execdriver/lxc/lxc_template.go | 5 +++-- daemon/execdriver/native/create.go | 5 ++++- runconfig/parse.go | 2 ++ 6 files changed, 20 insertions(+), 10 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index bbd4aa6a58..6769da9b25 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -341,6 +341,8 @@ func populateCommand(c *Container, env []string) error { parts := strings.SplitN(c.hostConfig.NetworkMode, ":", 2) switch parts[0] { case "none": + case "host": + en.HostNetworking = true case "bridge": if !c.Config.NetworkDisabled { network := c.NetworkSettings diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 994a27e501..4837a398ea 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -89,9 +89,10 @@ type Driver interface { // Network settings of the container type Network struct { - Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled - Mtu int `json:"mtu"` - ContainerID string `json:"container_id"` // id of the container to join network. + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` + ContainerID string `json:"container_id"` // id of the container to join network. + HostNetworking bool `json:"host_networking"` } type NetworkInterface struct { diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index 52d75fc9f8..e21e717645 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -3,15 +3,16 @@ package lxc import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/pkg/user" - "github.com/syndtr/gocapability/capability" "io/ioutil" "net" "os" "strings" "syscall" + + "github.com/dotcloud/docker/daemon/execdriver" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/pkg/user" + "github.com/syndtr/gocapability/capability" ) // Clear environment pollution introduced by lxc-start diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 19fa43c4c2..7fdc5ce92b 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -14,12 +14,13 @@ const LxcTemplate = ` lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 -{{else}} +lxc.network.mtu = {{.Network.Mtu}} +{{else if not .Network.HostNetworking}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up -{{end}} lxc.network.mtu = {{.Network.Mtu}} +{{end}} # root filesystem {{$ROOTFS := .Rootfs}} diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index b2dd395bb5..5070ef7838 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -53,6 +53,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container } func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error { + if c.Network.HostNetworking { + container.Namespaces.Get("NEWNET").Enabled = false + return nil + } container.Networks = []*libcontainer.Network{ { Mtu: c.Network.Mtu, @@ -90,7 +94,6 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. }, }) } - return nil } diff --git a/runconfig/parse.go b/runconfig/parse.go index 42ad5c1958..eb9886bb97 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -292,6 +292,8 @@ func parseNetMode(netMode string) (string, error) { return "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) } return netMode, nil + case "host": + return netMode, nil default: return "", fmt.Errorf("invalid netmode: %q", netMode) } From 5ca6532011436eee85ccb555a0832a82450454ea Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 14:45:39 -0700 Subject: [PATCH 188/219] Update host networking with hostname and files Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 43 +++++++++++++++++++++++++++++++++++------ runconfig/parse_test.go | 30 ---------------------------- 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 6769da9b25..9a08f87133 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -343,7 +343,7 @@ func populateCommand(c *Container, env []string) error { case "none": case "host": en.HostNetworking = true - case "bridge": + case "bridge", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ @@ -503,9 +503,18 @@ func (container *Container) StderrLogPipe() io.ReadCloser { return utils.NewBufReader(reader) } -func (container *Container) buildHostnameAndHostsFiles(IP string) { +func (container *Container) buildHostname() { container.HostnamePath = path.Join(container.root, "hostname") - ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) + + if container.Config.Domainname != "" { + ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) + } else { + ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) + } +} + +func (container *Container) buildHostnameAndHostsFiles(IP string) { + container.buildHostname() hostsContent := []byte(` 127.0.0.1 localhost @@ -523,12 +532,11 @@ ff02::2 ip6-allrouters } else if !container.Config.NetworkDisabled { hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) } - ioutil.WriteFile(container.HostsPath, hostsContent, 0644) } func (container *Container) allocateNetwork() error { - if container.Config.NetworkDisabled { + if container.Config.NetworkDisabled || container.hostConfig.NetworkMode == "host" { return nil } @@ -981,14 +989,22 @@ func (container *Container) setupContainerDns() error { if container.ResolvConfPath != "" { return nil } + var ( config = container.hostConfig daemon = container.daemon ) + + if config.NetworkMode == "host" { + container.ResolvConfPath = "/etc/resolv.conf" + return nil + } + resolvConf, err := utils.GetResolvConf() if err != nil { return err } + // If custom dns exists, then create a resolv.conf for the container if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { var ( @@ -1028,7 +1044,22 @@ func (container *Container) setupContainerDns() error { } func (container *Container) initializeNetworking() error { - if container.daemon.config.DisableNetwork { + var err error + if container.hostConfig.NetworkMode == "host" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + + parts := strings.SplitN(container.Config.Hostname, ".", 2) + if len(parts) > 1 { + container.Config.Hostname = parts[0] + container.Config.Domainname = parts[1] + } + container.HostsPath = "/etc/hosts" + + container.buildHostname() + } else if container.daemon.config.DisableNetwork { container.Config.NetworkDisabled = true container.buildHostnameAndHostsFiles("127.0.1.1") } else { diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index e1b4cf9f93..8ad40b9d2d 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -22,33 +22,3 @@ func TestParseLxcConfOpt(t *testing.T) { } } } - -func TestParseNetMode(t *testing.T) { - testFlags := []struct { - flag string - mode string - container string - err bool - }{ - {"", "", "", true}, - {"bridge", "bridge", "", false}, - {"disable", "disable", "", false}, - {"container:foo", "container", "foo", false}, - {"container:", "", "", true}, - {"container", "", "", true}, - {"unknown", "", "", true}, - } - - for _, to := range testFlags { - mode, err := parseNetMode(to.flag) - if mode != to.mode { - t.Fatalf("-net %s: expected net mode: %q, got: %q", to.flag, to.mode, mode) - } - if container != to.container { - t.Fatalf("-net %s: expected net container: %q, got: %q", to.flag, to.container, container) - } - if (err != nil) != to.err { - t.Fatal("-net %s: expected an error got none", to.flag) - } - } -} From c1c6b3ccd915084bc9472992afa16f677a074785 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 15:32:26 -0700 Subject: [PATCH 189/219] Add docs for --net flag Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- docs/sources/reference/run.md | 46 +++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 0125394d4f..521e8010e2 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -136,8 +136,8 @@ PID files): ## Network Settings - -n=true : Enable networking for this container - --dns=[] : Set custom dns servers for the container + --dns=[] : Set custom dns servers for the container + --net=bridge : Set the network mode By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking @@ -148,6 +148,48 @@ files or STDIN/STDOUT only. Your container will use the same DNS servers as the host by default, but you can override this with `--dns`. +Supported networking modes are: + +* none - no networking in the container +* bridge - (default) connect the container to the bridge via veth interfaces +* host - use the host's network stack inside the container +* container - use another container's network stack + +#### Mode: none +With the networking mode set to `none` a container will not have a access to +any external routes. The container will still have a `loopback` interface +enabled in the container but it does not have any routes to external traffic. + +#### Mode: bridge +With the networking mode set to `bridge` a container will use docker's default +networking setup. A bridge is setup on the host, commonly named `docker0`, +and a pair of veth interfaces will be created for the container. One side of +the veth pair will remain on the host attached to the bridge while the other +side of the pair will be placed inside the container's namespaces in addition +to the `loopback` interface. An IP address will be allocated for containers +on the bridge's network and trafic will be routed though this bridge to the +container. + +#### Mode: host +With the networking mode set to `host` a container will share the host's +network stack and all interfaces from the host will be available to the +container. The container's hostname will match the hostname on the host +system. Publishing ports and linking to other containers will not work +when sharing the host's network stack. + +#### Mode: container +With the networking mode set to `container` a container will share the +network stack of another container. The other container's name must be +provided in the format of `--net container:`. + +Example running a redis container with redis binding to localhost then +running the redis-cli and connecting to the redis server over the +localhost interface. + + $ docker run -d --name redis example/redis --bind 127.0.0.1 + $ # use the redis container's network stack to access localhost + $ docker run --rm -ti --net container:redis example/redis-cli -h 127.0.0.1 + ## Clean Up (–rm) By default a container's file system persists even after the container From 0b187b909be1dac60194250bc6e9ff292a0bd5c9 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 2 May 2014 16:59:28 -0700 Subject: [PATCH 190/219] Address code review feedback Also make sure we copy the joining containers hosts and resolv.conf with the hostname if we are joining it's network stack. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 40 ++++++++++++++++++++++++++++++++++------ runconfig/hostconfig.go | 17 +++++++++++++++-- runconfig/parse.go | 17 +++++++---------- 3 files changed, 56 insertions(+), 18 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 9a08f87133..123eca0263 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -338,7 +338,7 @@ func populateCommand(c *Container, env []string) error { Interface: nil, } - parts := strings.SplitN(c.hostConfig.NetworkMode, ":", 2) + parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "host": @@ -354,9 +354,9 @@ func populateCommand(c *Container, env []string) error { } } case "container": - nc := c.daemon.Get(parts[1]) - if nc == nil { - return fmt.Errorf("no such container to join network: %q", parts[1]) + nc, err := c.getNetworkedContainer() + if err != nil { + return err } en.ContainerID = nc.ID default: @@ -536,7 +536,8 @@ ff02::2 ip6-allrouters } func (container *Container) allocateNetwork() error { - if container.Config.NetworkDisabled || container.hostConfig.NetworkMode == "host" { + mode := container.hostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() || mode.IsHost() { return nil } @@ -1045,7 +1046,7 @@ func (container *Container) setupContainerDns() error { func (container *Container) initializeNetworking() error { var err error - if container.hostConfig.NetworkMode == "host" { + if container.hostConfig.NetworkMode.IsHost() { container.Config.Hostname, err = os.Hostname() if err != nil { return err @@ -1059,6 +1060,16 @@ func (container *Container) initializeNetworking() error { container.HostsPath = "/etc/hosts" container.buildHostname() + } else if container.hostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := container.getNetworkedContainer() + if err != nil { + return err + } + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname } else if container.daemon.config.DisableNetwork { container.Config.NetworkDisabled = true container.buildHostnameAndHostsFiles("127.0.1.1") @@ -1268,3 +1279,20 @@ func (container *Container) GetMountLabel() string { } return container.MountLabel } + +func (container *Container) getNetworkedContainer() (*Container, error) { + parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) + switch parts[0] { + case "container": + nc := container.daemon.Get(parts[1]) + if nc == nil { + return nil, fmt.Errorf("no such container to join network: %s", parts[1]) + } + if !nc.State.IsRunning() { + return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) + } + return nc, nil + default: + return nil, fmt.Errorf("network mode not set to container") + } +} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 83688367e3..79ffad723b 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -1,11 +1,24 @@ package runconfig import ( + "strings" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/utils" ) +type NetworkMode string + +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + type HostConfig struct { Binds []string ContainerIDFile string @@ -17,7 +30,7 @@ type HostConfig struct { Dns []string DnsSearch []string VolumesFrom []string - NetworkMode string + NetworkMode NetworkMode } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -25,7 +38,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), - NetworkMode: job.Getenv("NetworkMode"), + NetworkMode: NetworkMode(job.Getenv("NetworkMode")), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) diff --git a/runconfig/parse.go b/runconfig/parse.go index eb9886bb97..0d511ef2ec 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -62,7 +62,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:name_or_id': reuses another container network stack)") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack)") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") @@ -200,7 +200,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf netMode, err := parseNetMode(*flNetMode) if err != nil { - return nil, nil, cmd, fmt.Errorf("-net: invalid net mode: %v", err) + return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) } config := &Config{ @@ -282,19 +282,16 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { return out, nil } -func parseNetMode(netMode string) (string, error) { +func parseNetMode(netMode string) (NetworkMode, error) { parts := strings.Split(netMode, ":") switch mode := parts[0]; mode { - case "bridge", "none": - return mode, nil + case "bridge", "none", "host": case "container": if len(parts) < 2 || parts[1] == "" { - return "", fmt.Errorf("'container:' netmode requires a container id or name", netMode) + return "", fmt.Errorf("invalid container format container:") } - return netMode, nil - case "host": - return netMode, nil default: - return "", fmt.Errorf("invalid netmode: %q", netMode) + return "", fmt.Errorf("invalid --net: %s", netMode) } + return NetworkMode(netMode), nil } From 7da186c3e58c83605f0389d1214909b1cbbead39 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 5 May 2014 20:29:20 +0300 Subject: [PATCH 191/219] import sha512 to make sha512 ssl certs work Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- registry/registry.go | 1 + 1 file changed, 1 insertion(+) diff --git a/registry/registry.go b/registry/registry.go index 1bd73cdeb5..28b28c2b5c 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -3,6 +3,7 @@ package registry import ( "bytes" "crypto/sha256" + _ "crypto/sha512" "encoding/json" "errors" "fmt" From f2d7c77c638040f64af6f968044f12e44653859d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 10:47:55 -0700 Subject: [PATCH 192/219] Add alex as devmapper and btrfs maintainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/graphdriver/btrfs/MAINTAINERS | 1 + daemon/graphdriver/devmapper/MAINTAINERS | 1 + 2 files changed, 2 insertions(+) create mode 100644 daemon/graphdriver/btrfs/MAINTAINERS create mode 100644 daemon/graphdriver/devmapper/MAINTAINERS diff --git a/daemon/graphdriver/btrfs/MAINTAINERS b/daemon/graphdriver/btrfs/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/daemon/graphdriver/btrfs/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) From 5b094530c09bca403819c06635c2f7fbaf98b937 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Wed, 23 Apr 2014 11:00:12 +0200 Subject: [PATCH 193/219] cgroups: Update systemd to match fs backend This updates systemd.Apply to match the fs backend by: * Always join blockio controller (for stats) * Support CpusetCpus * Support MemorySwap Also, it removes the generic UnitProperties in favour of a single option to set the slice. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- .../execdriver/native/configuration/parse.go | 11 ++ pkg/cgroups/cgroups.go | 2 +- pkg/cgroups/systemd/apply_nosystemd.go | 2 +- pkg/cgroups/systemd/apply_systemd.go | 151 ++++++++++++++++-- 4 files changed, 153 insertions(+), 13 deletions(-) diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go index c3846af910..94af91aab1 100644 --- a/daemon/execdriver/native/configuration/parse.go +++ b/daemon/execdriver/native/configuration/parse.go @@ -27,6 +27,8 @@ var actions = map[string]Action{ "cgroups.memory_swap": memorySwap, // set the memory swap limit "cgroups.cpuset.cpus": cpusetCpus, // set the cpus used + "systemd.slice": systemdSlice, // set parent Slice used for systemd unit + "apparmor_profile": apparmorProfile, // set the apparmor profile to apply "fs.readonly": readonlyFs, // make the rootfs of the container read only @@ -41,6 +43,15 @@ func cpusetCpus(container *libcontainer.Container, context interface{}, value st return nil } +func systemdSlice(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set slice when cgroups are disabled") + } + container.Cgroups.Slice = value + + return nil +} + func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error { container.Context["apparmor_profile"] = value return nil diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index 86623845ae..0f93320725 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -22,7 +22,7 @@ type Cgroup struct { CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use Freezer string `json:"freezer,omitempty"` // set the freeze value for the process - UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties + Slice string `json:"slice,omitempty"` // Parent slice to use for systemd } type ActiveCgroup interface { diff --git a/pkg/cgroups/systemd/apply_nosystemd.go b/pkg/cgroups/systemd/apply_nosystemd.go index 226aa59f9d..4faa749745 100644 --- a/pkg/cgroups/systemd/apply_nosystemd.go +++ b/pkg/cgroups/systemd/apply_nosystemd.go @@ -11,6 +11,6 @@ func UseSystemd() bool { return false } -func systemdApply(c *Cgroup, pid int) (cgroups.ActiveCgroup, error) { +func Apply(c *Cgroup, pid int) (cgroups.ActiveCgroup, error) { return nil, fmt.Errorf("Systemd not supported") } diff --git a/pkg/cgroups/systemd/apply_systemd.go b/pkg/cgroups/systemd/apply_systemd.go index e1246f6e70..12dede9581 100644 --- a/pkg/cgroups/systemd/apply_systemd.go +++ b/pkg/cgroups/systemd/apply_systemd.go @@ -3,9 +3,10 @@ package systemd import ( - "fmt" "io/ioutil" + "os" "path/filepath" + "strconv" "strings" "sync" @@ -16,6 +17,7 @@ import ( ) type systemdCgroup struct { + cleanupDirs []string } type DeviceAllow struct { @@ -69,20 +71,42 @@ func getIfaceForUnit(unitName string) string { return "Unit" } +type cgroupArg struct { + File string + Value string +} + func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { var ( unitName = c.Parent + "-" + c.Name + ".scope" slice = "system.slice" properties []systemd1.Property + cpuArgs []cgroupArg + cpusetArgs []cgroupArg + memoryArgs []cgroupArg + res systemdCgroup ) - for _, v := range c.UnitProperties { - switch v[0] { - case "Slice": - slice = v[1] - default: - return nil, fmt.Errorf("Unknown unit propery %s", v[0]) + // First set up things not supported by systemd + + // -1 disables memorySwap + if c.MemorySwap >= 0 && (c.Memory != 0 || c.MemorySwap > 0) { + memorySwap := c.MemorySwap + + if memorySwap == 0 { + // By default, MemorySwap is set to twice the size of RAM. + memorySwap = c.Memory * 2 } + + memoryArgs = append(memoryArgs, cgroupArg{"memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10)}) + } + + if c.CpusetCpus != "" { + cpusetArgs = append(cpusetArgs, cgroupArg{"cpuset.cpus", c.CpusetCpus}) + } + + if c.Slice != "" { + slice = c.Slice } properties = append(properties, @@ -111,11 +135,12 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { })}) } - // Always enable accounting, this gets us the same behaviour as the raw implementation, + // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, systemd1.Property{"MemoryAccounting", dbus.MakeVariant(true)}, - systemd1.Property{"CPUAccounting", dbus.MakeVariant(true)}) + systemd1.Property{"CPUAccounting", dbus.MakeVariant(true)}, + systemd1.Property{"BlockIOAccounting", dbus.MakeVariant(true)}) if c.Memory != 0 { properties = append(properties, @@ -162,10 +187,114 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { return nil, err } } - return &systemdCgroup{}, nil + + if len(cpuArgs) != 0 { + mountpoint, err := cgroups.FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + + path := filepath.Join(mountpoint, cgroup) + + for _, arg := range cpuArgs { + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + } + + if len(memoryArgs) != 0 { + mountpoint, err := cgroups.FindCgroupMountpoint("memory") + if err != nil { + return nil, err + } + + path := filepath.Join(mountpoint, cgroup) + + for _, arg := range memoryArgs { + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + } + + if len(cpusetArgs) != 0 { + // systemd does not atm set up the cpuset controller, so we must manually + // join it. Additionally that is a very finicky controller where each + // level must have a full setup as the default for a new directory is "no cpus", + // so we avoid using any hierarchies here, creating a toplevel directory. + mountpoint, err := cgroups.FindCgroupMountpoint("cpuset") + if err != nil { + return nil, err + } + initPath, err := cgroups.GetInitCgroupDir("cpuset") + if err != nil { + return nil, err + } + + rootPath := filepath.Join(mountpoint, initPath) + + path := filepath.Join(mountpoint, initPath, c.Parent+"-"+c.Name) + + res.cleanupDirs = append(res.cleanupDirs, path) + + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + foundCpus := false + foundMems := false + + for _, arg := range cpusetArgs { + if arg.File == "cpuset.cpus" { + foundCpus = true + } + if arg.File == "cpuset.mems" { + foundMems = true + } + if err := ioutil.WriteFile(filepath.Join(path, arg.File), []byte(arg.Value), 0700); err != nil { + return nil, err + } + } + + // These are required, if not specified inherit from parent + if !foundCpus { + s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.cpus")) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cpuset.cpus"), s, 0700); err != nil { + return nil, err + } + } + + // These are required, if not specified inherit from parent + if !foundMems { + s, err := ioutil.ReadFile(filepath.Join(rootPath, "cpuset.mems")) + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "cpuset.mems"), s, 0700); err != nil { + return nil, err + } + } + + if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), []byte(strconv.Itoa(pid)), 0700); err != nil { + return nil, err + } + } + + return &res, nil } func (c *systemdCgroup) Cleanup() error { - // systemd cleans up, we don't need to do anything + // systemd cleans up, we don't need to do much + + for _, path := range c.cleanupDirs { + os.RemoveAll(path) + } + return nil } From 412324cfbe9b5e256d9af31b21e6ae142d39612c Mon Sep 17 00:00:00 2001 From: Rohit Jnagal Date: Mon, 5 May 2014 18:12:25 +0000 Subject: [PATCH 194/219] Check supplied hostname before using it. Docker-DCO-1.1-Signed-off-by: Rohit Jnagal (github: rjnagal) --- pkg/libcontainer/nsinit/init.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index faec12af32..99974c6c35 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -65,8 +65,10 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil { return fmt.Errorf("setup mount namespace %s", err) } - if err := system.Sethostname(container.Hostname); err != nil { - return fmt.Errorf("sethostname %s", err) + if container.Hostname != "" { + if err := system.Sethostname(container.Hostname); err != nil { + return fmt.Errorf("sethostname %s", err) + } } if err := FinalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) From db5f6b4aa0b34adbc9ba189a042e77e7bcdee681 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 12:34:21 -0700 Subject: [PATCH 195/219] Improve libcontainer namespace and cap format Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- .../execdriver/native/configuration/parse.go | 29 +-- .../native/configuration/parse_test.go | 19 +- daemon/execdriver/native/create.go | 4 +- .../native/template/default_template.go | 47 ++-- pkg/libcontainer/container.go | 4 +- pkg/libcontainer/container.json | 207 +++++------------- pkg/libcontainer/container_test.go | 14 +- pkg/libcontainer/nsinit/exec.go | 10 +- pkg/libcontainer/nsinit/execin.go | 29 ++- pkg/libcontainer/nsinit/unsupported.go | 2 +- .../security/capabilities/capabilities.go | 11 +- pkg/libcontainer/types.go | 45 ++-- pkg/libcontainer/types_linux.go | 12 +- 13 files changed, 170 insertions(+), 263 deletions(-) diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go index c3846af910..3bfc7d7e79 100644 --- a/daemon/execdriver/native/configuration/parse.go +++ b/daemon/execdriver/native/configuration/parse.go @@ -2,12 +2,13 @@ package configuration import ( "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/utils" "os/exec" "path/filepath" "strconv" "strings" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/utils" ) type Action func(*libcontainer.Container, interface{}, string) error @@ -97,38 +98,22 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st } func addCap(container *libcontainer.Container, context interface{}, value string) error { - c := container.CapabilitiesMask.Get(value) - if c == nil { - return fmt.Errorf("%s is not a valid capability", value) - } - c.Enabled = true + container.CapabilitiesMask[value] = true return nil } func dropCap(container *libcontainer.Container, context interface{}, value string) error { - c := container.CapabilitiesMask.Get(value) - if c == nil { - return fmt.Errorf("%s is not a valid capability", value) - } - c.Enabled = false + container.CapabilitiesMask[value] = false return nil } func addNamespace(container *libcontainer.Container, context interface{}, value string) error { - ns := container.Namespaces.Get(value) - if ns == nil { - return fmt.Errorf("%s is not a valid namespace", value[1:]) - } - ns.Enabled = true + container.Namespaces[value] = true return nil } func dropNamespace(container *libcontainer.Container, context interface{}, value string) error { - ns := container.Namespaces.Get(value) - if ns == nil { - return fmt.Errorf("%s is not a valid namespace", value[1:]) - } - ns.Enabled = false + container.Namespaces[value] = false return nil } diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go index c28176f2ef..1b0316b688 100644 --- a/daemon/execdriver/native/configuration/parse_test.go +++ b/daemon/execdriver/native/configuration/parse_test.go @@ -1,8 +1,9 @@ package configuration import ( - "github.com/dotcloud/docker/daemon/execdriver/native/template" "testing" + + "github.com/dotcloud/docker/daemon/execdriver/native/template" ) func TestSetReadonlyRootFs(t *testing.T) { @@ -38,10 +39,10 @@ func TestConfigurationsDoNotConflict(t *testing.T) { t.Fatal(err) } - if !container1.CapabilitiesMask.Get("NET_ADMIN").Enabled { + if !container1.CapabilitiesMask["NET_ADMIN"] { t.Fatal("container one should have NET_ADMIN enabled") } - if container2.CapabilitiesMask.Get("NET_ADMIN").Enabled { + if container2.CapabilitiesMask["NET_ADMIN"] { t.Fatal("container two should not have NET_ADMIN enabled") } } @@ -137,10 +138,10 @@ func TestAddCap(t *testing.T) { t.Fatal(err) } - if !container.CapabilitiesMask.Get("MKNOD").Enabled { + if !container.CapabilitiesMask["MKNOD"] { t.Fatal("container should have MKNOD enabled") } - if !container.CapabilitiesMask.Get("SYS_ADMIN").Enabled { + if !container.CapabilitiesMask["SYS_ADMIN"] { t.Fatal("container should have SYS_ADMIN enabled") } } @@ -153,14 +154,14 @@ func TestDropCap(t *testing.T) { } ) // enabled all caps like in privileged mode - for _, c := range container.CapabilitiesMask { - c.Enabled = true + for key := range container.CapabilitiesMask { + container.CapabilitiesMask[key] = true } if err := ParseConfiguration(container, nil, opts); err != nil { t.Fatal(err) } - if container.CapabilitiesMask.Get("MKNOD").Enabled { + if container.CapabilitiesMask["MKNOD"] { t.Fatal("container should not have MKNOD enabled") } } @@ -176,7 +177,7 @@ func TestDropNamespace(t *testing.T) { t.Fatal(err) } - if container.Namespaces.Get("NEWNET").Enabled { + if container.Namespaces["NEWNET"] { t.Fatal("container should not have NEWNET enabled") } } diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 5562d08986..12aa64c9df 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -79,8 +79,8 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver. } func (d *driver) setPrivileged(container *libcontainer.Container) error { - for _, c := range container.CapabilitiesMask { - c.Enabled = true + for key := range container.CapabilitiesMask { + container.CapabilitiesMask[key] = true } container.Cgroups.DeviceAccess = true diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go index 5dbe21ecb0..249c5d5fe8 100644 --- a/daemon/execdriver/native/template/default_template.go +++ b/daemon/execdriver/native/template/default_template.go @@ -9,30 +9,30 @@ import ( // New returns the docker default configuration for libcontainer func New() *libcontainer.Container { container := &libcontainer.Container{ - CapabilitiesMask: libcontainer.Capabilities{ - libcontainer.GetCapability("SETPCAP"), - libcontainer.GetCapability("SYS_MODULE"), - libcontainer.GetCapability("SYS_RAWIO"), - libcontainer.GetCapability("SYS_PACCT"), - libcontainer.GetCapability("SYS_ADMIN"), - libcontainer.GetCapability("SYS_NICE"), - libcontainer.GetCapability("SYS_RESOURCE"), - libcontainer.GetCapability("SYS_TIME"), - libcontainer.GetCapability("SYS_TTY_CONFIG"), - libcontainer.GetCapability("AUDIT_WRITE"), - libcontainer.GetCapability("AUDIT_CONTROL"), - libcontainer.GetCapability("MAC_OVERRIDE"), - libcontainer.GetCapability("MAC_ADMIN"), - libcontainer.GetCapability("NET_ADMIN"), - libcontainer.GetCapability("MKNOD"), - libcontainer.GetCapability("SYSLOG"), + CapabilitiesMask: map[string]bool{ + "SETPCAP": false, + "SYS_MODULE": false, + "SYS_RAWIO": false, + "SYS_PACCT": false, + "SYS_ADMIN": false, + "SYS_NICE": false, + "SYS_RESOURCE": false, + "SYS_TIME": false, + "SYS_TTY_CONFIG": false, + "AUDIT_WRITE": false, + "AUDIT_CONTROL": false, + "MAC_OVERRIDE": false, + "MAC_ADMIN": false, + "NET_ADMIN": false, + "MKNOD": true, + "SYSLOG": false, }, - Namespaces: libcontainer.Namespaces{ - libcontainer.GetNamespace("NEWNS"), - libcontainer.GetNamespace("NEWUTS"), - libcontainer.GetNamespace("NEWIPC"), - libcontainer.GetNamespace("NEWPID"), - libcontainer.GetNamespace("NEWNET"), + Namespaces: map[string]bool{ + "NEWNS": true, + "NEWUTS": true, + "NEWIPC": true, + "NEWPID": true, + "NEWNET": true, }, Cgroups: &cgroups.Cgroup{ Parent: "docker", @@ -40,7 +40,6 @@ func New() *libcontainer.Container { }, Context: libcontainer.Context{}, } - container.CapabilitiesMask.Get("MKNOD").Enabled = true if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "docker-default" } diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index ddcc6cab70..5acdff3d29 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -18,8 +18,8 @@ type Container struct { WorkingDir string `json:"working_dir,omitempty"` // current working directory Env []string `json:"environment,omitempty"` // environment to set Tty bool `json:"tty,omitempty"` // setup a proper tty or not - Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply - CapabilitiesMask Capabilities `json:"capabilities_mask,omitempty"` // capabilities to drop + Namespaces map[string]bool `json:"namespaces,omitempty"` // namespaces to apply + CapabilitiesMask map[string]bool `json:"capabilities_mask,omitempty"` // capabilities to drop Networks []*Network `json:"networks,omitempty"` // nil for host's network stack Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 20c1121911..33d79600d4 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -1,151 +1,62 @@ { - "mounts" : [ - { - "type" : "devtmpfs" - } - ], - "tty" : true, - "environment" : [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "hostname" : "koye", - "cgroups" : { - "parent" : "docker", - "name" : "docker-koye" - }, - "capabilities_mask" : [ - { - "value" : 8, - "key" : "SETPCAP", - "enabled" : false + "namespaces": { + "NEWNET": true, + "NEWPID": true, + "NEWIPC": true, + "NEWUTS": true, + "NEWNS": true + }, + "networks": [ + { + "gateway": "localhost", + "type": "loopback", + "address": "127.0.0.1/0", + "mtu": 1500 + }, + { + "gateway": "172.17.42.1", + "context": { + "prefix": "veth", + "bridge": "docker0" }, - { - "enabled" : false, - "value" : 16, - "key" : "SYS_MODULE" - }, - { - "value" : 17, - "key" : "SYS_RAWIO", - "enabled" : false - }, - { - "key" : "SYS_PACCT", - "value" : 20, - "enabled" : false - }, - { - "value" : 21, - "key" : "SYS_ADMIN", - "enabled" : false - }, - { - "value" : 23, - "key" : "SYS_NICE", - "enabled" : false - }, - { - "value" : 24, - "key" : "SYS_RESOURCE", - "enabled" : false - }, - { - "key" : "SYS_TIME", - "value" : 25, - "enabled" : false - }, - { - "enabled" : false, - "value" : 26, - "key" : "SYS_TTY_CONFIG" - }, - { - "key" : "AUDIT_WRITE", - "value" : 29, - "enabled" : false - }, - { - "value" : 30, - "key" : "AUDIT_CONTROL", - "enabled" : false - }, - { - "enabled" : false, - "key" : "MAC_OVERRIDE", - "value" : 32 - }, - { - "enabled" : false, - "key" : "MAC_ADMIN", - "value" : 33 - }, - { - "key" : "NET_ADMIN", - "value" : 12, - "enabled" : false - }, - { - "value" : 27, - "key" : "MKNOD", - "enabled" : true - }, - { - "value" : 34, - "key" : "SYSLOG", - "enabled" : false - } - ], - "networks" : [ - { - "mtu" : 1500, - "address" : "127.0.0.1/0", - "type" : "loopback", - "gateway" : "localhost" - }, - { - "mtu" : 1500, - "address" : "172.17.42.2/16", - "type" : "veth", - "context" : { - "bridge" : "docker0", - "prefix" : "veth" - }, - "gateway" : "172.17.42.1" - } - ], - "namespaces" : [ - { - "key" : "NEWNS", - "value" : 131072, - "enabled" : true, - "file" : "mnt" - }, - { - "key" : "NEWUTS", - "value" : 67108864, - "enabled" : true, - "file" : "uts" - }, - { - "enabled" : true, - "file" : "ipc", - "key" : "NEWIPC", - "value" : 134217728 - }, - { - "file" : "pid", - "enabled" : true, - "value" : 536870912, - "key" : "NEWPID" - }, - { - "enabled" : true, - "file" : "net", - "key" : "NEWNET", - "value" : 1073741824 - } - ] + "type": "veth", + "address": "172.17.42.2/16", + "mtu": 1500 + } + ], + "capabilities_mask": { + "SYSLOG": false, + "MKNOD": true, + "NET_ADMIN": false, + "MAC_ADMIN": false, + "MAC_OVERRIDE": false, + "AUDIT_CONTROL": false, + "AUDIT_WRITE": false, + "SYS_TTY_CONFIG": false, + "SETPCAP": false, + "SYS_MODULE": false, + "SYS_RAWIO": false, + "SYS_PACCT": false, + "SYS_ADMIN": false, + "SYS_NICE": false, + "SYS_RESOURCE": false, + "SYS_TIME": false + }, + "cgroups": { + "name": "docker-koye", + "parent": "docker" + }, + "hostname": "koye", + "environment": [ + "HOME=/", + "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", + "container=docker", + "TERM=xterm-256color" + ], + "tty": true, + "mounts": [ + { + "type": "devtmpfs" + } + ] } diff --git a/pkg/libcontainer/container_test.go b/pkg/libcontainer/container_test.go index d710a6a53c..c02385af3f 100644 --- a/pkg/libcontainer/container_test.go +++ b/pkg/libcontainer/container_test.go @@ -15,7 +15,7 @@ func TestContainerJsonFormat(t *testing.T) { var container *Container if err := json.NewDecoder(f).Decode(&container); err != nil { - t.Fatal("failed to decode container config") + t.Fatalf("failed to decode container config: %s", err) } if container.Hostname != "koye" { t.Log("hostname is not set") @@ -27,32 +27,32 @@ func TestContainerJsonFormat(t *testing.T) { t.Fail() } - if !container.Namespaces.Contains("NEWNET") { + if !container.Namespaces["NEWNET"] { t.Log("namespaces should contain NEWNET") t.Fail() } - if container.Namespaces.Contains("NEWUSER") { + if container.Namespaces["NEWUSER"] { t.Log("namespaces should not contain NEWUSER") t.Fail() } - if !container.CapabilitiesMask.Contains("SYS_ADMIN") { + if _, exists := container.CapabilitiesMask["SYS_ADMIN"]; !exists { t.Log("capabilities mask should contain SYS_ADMIN") t.Fail() } - if container.CapabilitiesMask.Get("SYS_ADMIN").Enabled { + if container.CapabilitiesMask["SYS_ADMIN"] { t.Log("SYS_ADMIN should not be enabled in capabilities mask") t.Fail() } - if !container.CapabilitiesMask.Get("MKNOD").Enabled { + if !container.CapabilitiesMask["MKNOD"] { t.Log("MKNOD should be enabled in capabilities mask") t.Fail() } - if container.CapabilitiesMask.Contains("SYS_CHROOT") { + if container.CapabilitiesMask["SYS_CHROOT"] { t.Log("capabilities mask should not contain SYS_CHROOT") t.Fail() } diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 8886efeb32..5d0d772a0f 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -159,10 +159,12 @@ func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *Sy // GetNamespaceFlags parses the container's Namespaces options to set the correct // flags on clone, unshare, and setns -func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { - for _, ns := range namespaces { - if ns.Enabled { - flag |= ns.Value +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { + for key, enabled := range namespaces { + if enabled { + if ns := libcontainer.GetNamespace(key); ns != nil { + flag |= ns.Value + } } } return flag diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index 608437f855..40b95093dd 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -23,11 +23,13 @@ func ExecIn(container *libcontainer.Container, nspid int, args []string) (int, e return -1, err } - for _, nsv := range container.Namespaces { + for key, enabled := range container.Namespaces { // skip the PID namespace on unshare because it it not supported - if nsv.Enabled && nsv.Key != "NEWPID" { - if err := system.Unshare(nsv.Value); err != nil { - return -1, err + if enabled && key != "NEWPID" { + if ns := libcontainer.GetNamespace(key); ns != nil { + if err := system.Unshare(ns.Value); err != nil { + return -1, err + } } } } @@ -59,7 +61,7 @@ func ExecIn(container *libcontainer.Container, nspid int, args []string) (int, e // if the container has a new pid and mount namespace we need to // remount proc and sys to pick up the changes - if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { + if container.Namespaces["NEWNS"] && container.Namespaces["NEWPID"] { pid, err := system.Fork() if err != nil { return -1, err @@ -102,13 +104,18 @@ dropAndExec: } func getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { - fds := make([]uintptr, len(container.Namespaces)) - for i, ns := range container.Namespaces { - f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) - if err != nil { - return fds, err + fds := []uintptr{} + + for key, enabled := range container.Namespaces { + if enabled { + if ns := libcontainer.GetNamespace(key); ns != nil { + f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) + if err != nil { + return fds, err + } + fds = append(fds, f.Fd()) + } } - fds[i] = f.Fd() } return fds, nil } diff --git a/pkg/libcontainer/nsinit/unsupported.go b/pkg/libcontainer/nsinit/unsupported.go index f213f2ec88..929b3dba5b 100644 --- a/pkg/libcontainer/nsinit/unsupported.go +++ b/pkg/libcontainer/nsinit/unsupported.go @@ -23,6 +23,6 @@ func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveC return nil, libcontainer.ErrUnsupported } -func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { +func GetNamespaceFlags(namespaces map[string]bool) (flag int) { return 0 } diff --git a/pkg/libcontainer/security/capabilities/capabilities.go b/pkg/libcontainer/security/capabilities/capabilities.go index 4b81e708c7..ad13e672c7 100644 --- a/pkg/libcontainer/security/capabilities/capabilities.go +++ b/pkg/libcontainer/security/capabilities/capabilities.go @@ -1,9 +1,10 @@ package capabilities import ( + "os" + "github.com/dotcloud/docker/pkg/libcontainer" "github.com/syndtr/gocapability/capability" - "os" ) // DropCapabilities drops capabilities for the current process based @@ -26,9 +27,11 @@ func DropCapabilities(container *libcontainer.Container) error { // getCapabilitiesMask returns the specific cap mask values for the libcontainer types func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap { drop := []capability.Cap{} - for _, c := range container.CapabilitiesMask { - if !c.Enabled { - drop = append(drop, c.Value) + for key, enabled := range container.CapabilitiesMask { + if !enabled { + if c := libcontainer.GetCapability(key); c != nil { + drop = append(drop, c.Value) + } } } return drop diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index f5fe6cffa9..8f056c817d 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -2,6 +2,7 @@ package libcontainer import ( "errors" + "github.com/syndtr/gocapability/capability" ) @@ -38,31 +39,30 @@ var ( namespaceList = Namespaces{} capabilityList = Capabilities{ - {Key: "SETPCAP", Value: capability.CAP_SETPCAP, Enabled: false}, - {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE, Enabled: false}, - {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO, Enabled: false}, - {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT, Enabled: false}, - {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN, Enabled: false}, - {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE, Enabled: false}, - {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE, Enabled: false}, - {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME, Enabled: false}, - {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG, Enabled: false}, - {Key: "MKNOD", Value: capability.CAP_MKNOD, Enabled: false}, - {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE, Enabled: false}, - {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL, Enabled: false}, - {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: false}, - {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: false}, - {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: false}, - {Key: "SYSLOG", Value: capability.CAP_SYSLOG, Enabled: false}, + {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, + {Key: "MKNOD", Value: capability.CAP_MKNOD}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, } ) type ( Namespace struct { - Key string `json:"key,omitempty"` - Enabled bool `json:"enabled,omitempty"` - Value int `json:"value,omitempty"` - File string `json:"file,omitempty"` + Key string `json:"key,omitempty"` + Value int `json:"value,omitempty"` + File string `json:"file,omitempty"` } Namespaces []*Namespace ) @@ -98,9 +98,8 @@ func (n Namespaces) Get(ns string) *Namespace { type ( Capability struct { - Key string `json:"key,omitempty"` - Enabled bool `json:"enabled"` - Value capability.Cap `json:"value,omitempty"` + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` } Capabilities []*Capability ) diff --git a/pkg/libcontainer/types_linux.go b/pkg/libcontainer/types_linux.go index 1f937e0c97..c14531df20 100644 --- a/pkg/libcontainer/types_linux.go +++ b/pkg/libcontainer/types_linux.go @@ -6,11 +6,11 @@ import ( func init() { namespaceList = Namespaces{ - {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt", Enabled: true}, - {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts", Enabled: true}, - {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc", Enabled: true}, - {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user", Enabled: true}, - {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid", Enabled: true}, - {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net", Enabled: true}, + {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, + {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, + {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, + {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, + {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, + {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, } } From b25208ee3e4d652d8b1f86ffac343b718118b745 Mon Sep 17 00:00:00 2001 From: Kevin Menard Date: Mon, 5 May 2014 15:43:11 -0400 Subject: [PATCH 196/219] It's its. --- docs/sources/introduction/working-with-docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/introduction/working-with-docker.md b/docs/sources/introduction/working-with-docker.md index d6bdb2260d..8d946e6846 100644 --- a/docs/sources/introduction/working-with-docker.md +++ b/docs/sources/introduction/working-with-docker.md @@ -293,7 +293,7 @@ A `#` sign is used to provide a comment: > **Tip:** The `Dockerfile` is very flexible and provides a powerful set > of instructions for building applications. To learn more about the -> `Dockerfile` and it's instructions see the [Dockerfile +> `Dockerfile` and its instructions see the [Dockerfile > Reference](http://docs.docker.io/reference/builder/). ### First steps with the Dockerfile From 01fec73ba4cff45ac08c0330ea0d67aff70ebf8e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 13:54:37 -0700 Subject: [PATCH 197/219] Update after namespace refactor Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/create.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go index 51cc2ddfd3..a7b3d9a107 100644 --- a/daemon/execdriver/native/create.go +++ b/daemon/execdriver/native/create.go @@ -54,7 +54,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error { if c.Network.HostNetworking { - container.Namespaces.Get("NEWNET").Enabled = false + container.Namespaces["NEWNET"] = false return nil } container.Networks = []*libcontainer.Network{ From 4994b0fe5459996691dde18155a7b712762e8e53 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 14:45:14 -0700 Subject: [PATCH 198/219] Move envconfig generation to lxc driver Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/container.go | 36 +-------------------------------- daemon/execdriver/lxc/driver.go | 15 ++++++++++++++ daemon/volumes.go | 13 ++++-------- 3 files changed, 20 insertions(+), 44 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 123eca0263..7313804326 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -168,19 +168,6 @@ func (container *Container) WriteHostConfig() (err error) { return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } -func (container *Container) generateEnvConfig(env []string) error { - data, err := json.Marshal(env) - if err != nil { - return err - } - p, err := container.EnvConfigPath() - if err != nil { - return err - } - ioutil.WriteFile(p, data, 0600) - return nil -} - func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { var cStdout, cStderr io.ReadCloser @@ -422,15 +409,10 @@ func (container *Container) Start() (err error) { if err != nil { return err } - env := container.createDaemonEnvironment(linkedEnv) - // TODO: This is only needed for lxc so we should look for a way to - // remove this dep - if err := container.generateEnvConfig(env); err != nil { - return err - } if err := container.setupWorkingDirectory(); err != nil { return err } + env := container.createDaemonEnvironment(linkedEnv) if err := populateCommand(container, env); err != nil { return err } @@ -851,22 +833,6 @@ func (container *Container) jsonPath() string { return path.Join(container.root, "config.json") } -func (container *Container) EnvConfigPath() (string, error) { - p := path.Join(container.root, "config.env") - if _, err := os.Stat(p); err != nil { - if os.IsNotExist(err) { - f, err := os.Create(p) - if err != nil { - return "", err - } - f.Close() - } else { - return "", err - } - } - return p, nil -} - // This method must be exported to be used from the lxc template // This directory is only usable when the container is running func (container *Container) RootfsPath() string { diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 2c06211c0d..d787d8d873 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -1,6 +1,7 @@ package lxc import ( + "encoding/json" "fmt" "io/ioutil" "log" @@ -85,6 +86,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba if err := execdriver.SetTerminal(c, pipes); err != nil { return -1, err } + if err := d.generateEnvConfig(c); err != nil { + return -1, err + } configPath, err := d.generateLXCConfig(c) if err != nil { return -1, err @@ -416,3 +420,14 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { } return root, nil } + +func (d *driver) generateEnvConfig(c *execdriver.Command) error { + data, err := json.Marshal(c.Env) + if err != nil { + return err + } + p := path.Join(d.root, "containers", c.ID, "config.env") + c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true}) + + return ioutil.WriteFile(p, data, 0600) +} diff --git a/daemon/volumes.go b/daemon/volumes.go index a6570845bf..a15e3084b2 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -2,14 +2,15 @@ package daemon import ( "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon/execdriver" - "github.com/dotcloud/docker/utils" "io/ioutil" "os" "path/filepath" "strings" "syscall" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon/execdriver" + "github.com/dotcloud/docker/utils" ) type BindMap struct { @@ -34,14 +35,8 @@ func prepareVolumesForContainer(container *Container) error { } func setupMountsForContainer(container *Container) error { - envPath, err := container.EnvConfigPath() - if err != nil { - return err - } - mounts := []execdriver.Mount{ {container.daemon.sysInitPath, "/.dockerinit", false, true}, - {envPath, "/.dockerenv", false, true}, {container.ResolvConfPath, "/etc/resolv.conf", false, true}, } From a1a029f6d7f980960135c1346a1e9f7830692fac Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 5 May 2014 22:05:14 +0000 Subject: [PATCH 199/219] add etchosts Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- daemon/container.go | 24 ++------ pkg/networkfs/MAINTAINERS | 1 + pkg/networkfs/etchosts/etchosts.go | 34 ++++++++++++ pkg/networkfs/etchosts/etchosts_test.go | 74 +++++++++++++++++++++++++ 4 files changed, 114 insertions(+), 19 deletions(-) create mode 100644 pkg/networkfs/MAINTAINERS create mode 100644 pkg/networkfs/etchosts/etchosts.go create mode 100644 pkg/networkfs/etchosts/etchosts_test.go diff --git a/daemon/container.go b/daemon/container.go index 123eca0263..76bab3719b 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -22,6 +22,7 @@ import ( "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/label" + "github.com/dotcloud/docker/pkg/networkfs/etchosts" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -513,26 +514,11 @@ func (container *Container) buildHostname() { } } -func (container *Container) buildHostnameAndHostsFiles(IP string) { +func (container *Container) buildHostnameAndHostsFiles(IP string) error { container.buildHostname() - hostsContent := []byte(` -127.0.0.1 localhost -::1 localhost ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters -`) - container.HostsPath = path.Join(container.root, "hosts") - - if container.Config.Domainname != "" { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...) - } else if !container.Config.NetworkDisabled { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) - } - ioutil.WriteFile(container.HostsPath, hostsContent, 0644) + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname) } func (container *Container) allocateNetwork() error { @@ -1072,12 +1058,12 @@ func (container *Container) initializeNetworking() error { container.Config.Domainname = nc.Config.Domainname } else if container.daemon.config.DisableNetwork { container.Config.NetworkDisabled = true - container.buildHostnameAndHostsFiles("127.0.1.1") + return container.buildHostnameAndHostsFiles("127.0.1.1") } else { if err := container.allocateNetwork(); err != nil { return err } - container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) + return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) } return nil } diff --git a/pkg/networkfs/MAINTAINERS b/pkg/networkfs/MAINTAINERS new file mode 100644 index 0000000000..ceeb0cfd18 --- /dev/null +++ b/pkg/networkfs/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go new file mode 100644 index 0000000000..169797071a --- /dev/null +++ b/pkg/networkfs/etchosts/etchosts.go @@ -0,0 +1,34 @@ +package etchosts + +import ( + "bytes" + "fmt" + "io/ioutil" +) + +var defaultContent = map[string]string{ + "localhost": "127.0.0.1", + "localhost ip6-localhost ip6-loopback": "::1", + "ip6-localnet": "fe00::0", + "ip6-mcastprefix": "ff00::0", + "ip6-allnodes": "ff02::1", + "ip6-allrouters": "ff02::2", +} + +func Build(path, IP, hostname, domainname string) error { + content := bytes.NewBuffer(nil) + if IP != "" { + if domainname != "" { + content.WriteString(fmt.Sprintf("%s\t%s.%s %s\n", IP, hostname, domainname, hostname)) + } else { + content.WriteString(fmt.Sprintf("%s\t%s\n", IP, hostname)) + } + } + + for hosts, ip := range defaultContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + return ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go new file mode 100644 index 0000000000..da5662d64f --- /dev/null +++ b/pkg/networkfs/etchosts/etchosts_test.go @@ -0,0 +1,74 @@ +package etchosts + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestBuildHostnameDomainname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname") + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildHostname(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "10.11.12.13", "testhostname", "") + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} + +func TestBuildNoIP(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), "", "testhostname", "") + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := ""; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} From 3744452ecf46351758478e99795e20b186a1bee4 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 5 May 2014 22:51:32 +0000 Subject: [PATCH 200/219] add resolvconf Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- daemon/container.go | 23 +--- daemon/daemon.go | 3 +- daemon/networkdriver/bridge/driver.go | 14 ++- pkg/networkfs/resolvconf/resolvconf.go | 87 +++++++++++++ pkg/networkfs/resolvconf/resolvconf_test.go | 133 ++++++++++++++++++++ utils/utils.go | 54 +------- utils/utils_test.go | 103 --------------- 7 files changed, 237 insertions(+), 180 deletions(-) create mode 100644 pkg/networkfs/resolvconf/resolvconf.go create mode 100644 pkg/networkfs/resolvconf/resolvconf_test.go diff --git a/daemon/container.go b/daemon/container.go index 76bab3719b..68d2d2a62a 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -23,6 +23,7 @@ import ( "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/networkfs/etchosts" + "github.com/dotcloud/docker/pkg/networkfs/resolvconf" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -987,7 +988,7 @@ func (container *Container) setupContainerDns() error { return nil } - resolvConf, err := utils.GetResolvConf() + resolvConf, err := resolvconf.Get() if err != nil { return err } @@ -995,8 +996,8 @@ func (container *Container) setupContainerDns() error { // If custom dns exists, then create a resolv.conf for the container if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { var ( - dns = utils.GetNameservers(resolvConf) - dnsSearch = utils.GetSearchDomains(resolvConf) + dns = resolvconf.GetNameservers(resolvConf) + dnsSearch = resolvconf.GetSearchDomains(resolvConf) ) if len(config.Dns) > 0 { dns = config.Dns @@ -1009,21 +1010,7 @@ func (container *Container) setupContainerDns() error { dnsSearch = daemon.config.DnsSearch } container.ResolvConfPath = path.Join(container.root, "resolv.conf") - f, err := os.Create(container.ResolvConfPath) - if err != nil { - return err - } - defer f.Close() - for _, dns := range dns { - if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { - return err - } - } - if len(dnsSearch) > 0 { - if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil { - return err - } - } + return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) } else { container.ResolvConfPath = "/etc/resolv.conf" } diff --git a/daemon/daemon.go b/daemon/daemon.go index 22182f389f..00b6d9eee2 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -29,6 +29,7 @@ import ( "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/pkg/networkfs/resolvconf" "github.com/dotcloud/docker/pkg/selinux" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" @@ -981,7 +982,7 @@ func (daemon *Daemon) SetServer(server Server) { } func (daemon *Daemon) checkLocaldns() error { - resolvConf, err := utils.GetResolvConf() + resolvConf, err := resolvconf.Get() if err != nil { return err } diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index 4828e3b9cf..c64aa423d1 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -2,6 +2,11 @@ package bridge import ( "fmt" + "io/ioutil" + "log" + "net" + "strings" + "github.com/dotcloud/docker/daemon/networkdriver" "github.com/dotcloud/docker/daemon/networkdriver/ipallocator" "github.com/dotcloud/docker/daemon/networkdriver/portallocator" @@ -9,11 +14,8 @@ import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/iptables" "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/pkg/networkfs/resolvconf" "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "net" - "strings" ) const ( @@ -222,13 +224,13 @@ func setupIPTables(addr net.Addr, icc bool) error { // If it can't find an address which doesn't conflict, it will return an error. func createBridge(bridgeIP string) error { nameservers := []string{} - resolvConf, _ := utils.GetResolvConf() + resolvConf, _ := resolvconf.Get() // we don't check for an error here, because we don't really care // if we can't read /etc/resolv.conf. So instead we skip the append // if resolvConf is nil. It either doesn't exist, or we can't read it // for some reason. if resolvConf != nil { - nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) + nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...) } var ifaceAddr string diff --git a/pkg/networkfs/resolvconf/resolvconf.go b/pkg/networkfs/resolvconf/resolvconf.go new file mode 100644 index 0000000000..d6854fb3b1 --- /dev/null +++ b/pkg/networkfs/resolvconf/resolvconf.go @@ -0,0 +1,87 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" +) + +func Get() ([]byte, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + return resolv, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte) []string { + nameservers := []string{} + re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) + for _, line := range getLines(resolvConf, []byte("#")) { + var ns = re.FindSubmatch(line) + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf) { + nameservers = append(nameservers, nameserver+"/32") + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + re := regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := re.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +func Build(path string, dns, dnsSearch []string) error { + content := bytes.NewBuffer(nil) + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return err + } + } + if len(dnsSearch) > 0 { + if _, err := content.WriteString("search " + strings.Join(dnsSearch, " ") + "\n"); err != nil { + return err + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/pkg/networkfs/resolvconf/resolvconf_test.go b/pkg/networkfs/resolvconf/resolvconf_test.go new file mode 100644 index 0000000000..fd20712376 --- /dev/null +++ b/pkg/networkfs/resolvconf/resolvconf_test.go @@ -0,0 +1,133 @@ +package resolvconf + +import ( + "bytes" + "io/ioutil" + "os" + "testing" +) + +func TestGet(t *testing.T) { + resolvConfUtils, err := Get() + if err != nil { + t.Fatal(err) + } + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + t.Fatal(err) + } + if string(resolvConfUtils) != string(resolvConfSystem) { + t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") + } +} + +func TestGetNameservers(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4", "40.3.200.10"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, + } { + test := GetNameservers([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetNameserversAsCIDR(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4/32"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, + } { + test := GetNameserversAsCIDR([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func TestGetSearchDomains(t *testing.T) { + for resolv, result := range map[string][]string{ + `search example.com`: {"example.com"}, + `search example.com # ignored`: {"example.com"}, + ` search example.com `: {"example.com"}, + ` search example.com # ignored`: {"example.com"}, + `search foo.example.com example.com`: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, + ``: {}, + `# ignored`: {}, + `nameserver 1.2.3.4 +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search dup1.example.com dup2.example.com +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search foo.example.com example.com +nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, + } { + test := GetSearchDomains([]byte(resolv)) + if !strSlicesEqual(test, result) { + t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + +func strSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + + return true +} + +func TestBuild(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + + err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}) + if err != nil { + t.Fatal(err) + } + + content, err := ioutil.ReadFile(file.Name()) + if err != nil { + t.Fatal(err) + } + + if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) { + t.Fatalf("Expected to find '%s' got '%s'", expected, content) + } +} diff --git a/utils/utils.go b/utils/utils.go index 066cfbac5a..4ef44b5617 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -9,7 +9,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/dockerversion" "index/suffixarray" "io" "io/ioutil" @@ -23,6 +22,8 @@ import ( "strings" "sync" "time" + + "github.com/dotcloud/docker/dockerversion" ) type KeyValuePair struct { @@ -779,17 +780,6 @@ func IsGIT(str string) bool { return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) } -// GetResolvConf opens and read the content of /etc/resolv.conf. -// It returns it as byte slice. -func GetResolvConf() ([]byte, error) { - resolv, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - Errorf("Error openning resolv.conf: %s", err) - return nil, err - } - return resolv, nil -} - // CheckLocalDns looks into the /etc/resolv.conf, // it returns true if there is a local nameserver or if there is no nameserver. func CheckLocalDns(resolvConf []byte) bool { @@ -825,46 +815,6 @@ func GetLines(input []byte, commentMarker []byte) [][]byte { return output } -// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf -func GetNameservers(resolvConf []byte) []string { - nameservers := []string{} - re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) - for _, line := range GetLines(resolvConf, []byte("#")) { - var ns = re.FindSubmatch(line) - if len(ns) > 0 { - nameservers = append(nameservers, string(ns[1])) - } - } - return nameservers -} - -// GetNameserversAsCIDR returns nameservers (if any) listed in -// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") -// This function's output is intended for net.ParseCIDR -func GetNameserversAsCIDR(resolvConf []byte) []string { - nameservers := []string{} - for _, nameserver := range GetNameservers(resolvConf) { - nameservers = append(nameservers, nameserver+"/32") - } - return nameservers -} - -// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf -// If more than one search line is encountered, only the contents of the last -// one is returned. -func GetSearchDomains(resolvConf []byte) []string { - re := regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) - domains := []string{} - for _, line := range GetLines(resolvConf, []byte("#")) { - match := re.FindSubmatch(line) - if match == nil { - continue - } - domains = strings.Fields(string(match[1])) - } - return domains -} - // FIXME: Change this not to receive default value as parameter func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( diff --git a/utils/utils_test.go b/utils/utils_test.go index 501ae67c2c..ccd212202c 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -377,20 +377,6 @@ func TestParseRepositoryTag(t *testing.T) { } } -func TestGetResolvConf(t *testing.T) { - resolvConfUtils, err := GetResolvConf() - if err != nil { - t.Fatal(err) - } - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - t.Fatal(err) - } - if string(resolvConfUtils) != string(resolvConfSystem) { - t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") - } -} - func TestCheckLocalDns(t *testing.T) { for resolv, result := range map[string]bool{`# Dynamic nameserver 10.0.2.3 @@ -464,95 +450,6 @@ func TestParsePortMapping(t *testing.T) { } } -func TestGetNameservers(t *testing.T) { - for resolv, result := range map[string][]string{` -nameserver 1.2.3.4 -nameserver 40.3.200.10 -search example.com`: {"1.2.3.4", "40.3.200.10"}, - `search example.com`: {}, - `nameserver 1.2.3.4 -search example.com -nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, - ``: {}, - ` nameserver 1.2.3.4 `: {"1.2.3.4"}, - `search example.com -nameserver 1.2.3.4 -#nameserver 4.3.2.1`: {"1.2.3.4"}, - `search example.com -nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, - } { - test := GetNameservers([]byte(resolv)) - if !StrSlicesEqual(test, result) { - t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func TestGetNameserversAsCIDR(t *testing.T) { - for resolv, result := range map[string][]string{` -nameserver 1.2.3.4 -nameserver 40.3.200.10 -search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, - `search example.com`: {}, - `nameserver 1.2.3.4 -search example.com -nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, - ``: {}, - ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 -#nameserver 4.3.2.1`: {"1.2.3.4/32"}, - `search example.com -nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, - } { - test := GetNameserversAsCIDR([]byte(resolv)) - if !StrSlicesEqual(test, result) { - t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func TestGetSearchDomains(t *testing.T) { - for resolv, result := range map[string][]string{ - `search example.com`: {"example.com"}, - `search example.com # ignored`: {"example.com"}, - ` search example.com `: {"example.com"}, - ` search example.com # ignored`: {"example.com"}, - `search foo.example.com example.com`: {"foo.example.com", "example.com"}, - ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, - ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, - ``: {}, - `# ignored`: {}, - `nameserver 1.2.3.4 -search foo.example.com example.com`: {"foo.example.com", "example.com"}, - `nameserver 1.2.3.4 -search dup1.example.com dup2.example.com -search foo.example.com example.com`: {"foo.example.com", "example.com"}, - `nameserver 1.2.3.4 -search foo.example.com example.com -nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, - } { - test := GetSearchDomains([]byte(resolv)) - if !StrSlicesEqual(test, result) { - t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) - } - } -} - -func StrSlicesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if v != b[i] { - return false - } - } - - return true -} - func TestReplaceAndAppendEnvVars(t *testing.T) { var ( d = []string{"HOME=/"} From 55f3e72d7f6b996c0874d402c95f4b8c9a7d80d9 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 5 May 2014 23:23:14 +0000 Subject: [PATCH 201/219] propagate errors write Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- daemon/container.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/daemon/container.go b/daemon/container.go index 2a17ff1ece..20a320307b 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -487,18 +487,18 @@ func (container *Container) StderrLogPipe() io.ReadCloser { return utils.NewBufReader(reader) } -func (container *Container) buildHostname() { +func (container *Container) buildHostnameFile() error { container.HostnamePath = path.Join(container.root, "hostname") - if container.Config.Domainname != "" { - ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) - } else { - ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) + return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) } + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) } func (container *Container) buildHostnameAndHostsFiles(IP string) error { - container.buildHostname() + if err := container.buildHostnameFile(); err != nil { + return err + } container.HostsPath = path.Join(container.root, "hosts") return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname) @@ -998,7 +998,7 @@ func (container *Container) initializeNetworking() error { } container.HostsPath = "/etc/hosts" - container.buildHostname() + return container.buildHostnameFile() } else if container.hostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join nc, err := container.getNetworkedContainer() From 41cfaa738c2d8583ecca50948c9df5eda3dfd7f1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 16:48:56 -0700 Subject: [PATCH 202/219] Move Attach from container to daemon This moves the Attach method from the container to the daemon. This method mostly supports the http attach logic and does not have anything to do with the running of a container. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/attach.go | 153 ++++++++++++++++++++++++++++++++++++++++++++ daemon/container.go | 144 ----------------------------------------- server/buildfile.go | 16 ++--- server/server.go | 2 +- 4 files changed, 162 insertions(+), 153 deletions(-) create mode 100644 daemon/attach.go diff --git a/daemon/attach.go b/daemon/attach.go new file mode 100644 index 0000000000..0e3b8b8a9d --- /dev/null +++ b/daemon/attach.go @@ -0,0 +1,153 @@ +package daemon + +import ( + "io" + + "github.com/dotcloud/docker/utils" +) + +func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { + var ( + cStdout, cStderr io.ReadCloser + nJobs int + errors = make(chan error, 3) + ) + + if stdin != nil && container.Config.OpenStdin { + nJobs += 1 + if cStdin, err := container.StdinPipe(); err != nil { + errors <- err + } else { + go func() { + utils.Debugf("attach: stdin: begin") + defer utils.Debugf("attach: stdin: end") + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if container.Config.StdinOnce && !container.Config.Tty { + defer cStdin.Close() + } else { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + } + if container.Config.Tty { + _, err = utils.CopyEscapable(cStdin, stdin) + } else { + _, err = io.Copy(cStdin, stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stdin: %s", err) + } + errors <- err + }() + } + } + if stdout != nil { + nJobs += 1 + if p, err := container.StdoutPipe(); err != nil { + errors <- err + } else { + cStdout = p + go func() { + utils.Debugf("attach: stdout: begin") + defer utils.Debugf("attach: stdout: end") + // If we are in StdinOnce mode, then close stdin + if container.Config.StdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stdout, cStdout) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stdout: %s", err) + } + errors <- err + }() + } + } else { + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + if cStdout, err := container.StdoutPipe(); err != nil { + utils.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&utils.NopWriter{}, cStdout) + } + }() + } + if stderr != nil { + nJobs += 1 + if p, err := container.StderrPipe(); err != nil { + errors <- err + } else { + cStderr = p + go func() { + utils.Debugf("attach: stderr: begin") + defer utils.Debugf("attach: stderr: end") + // If we are in StdinOnce mode, then close stdin + if container.Config.StdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stderr, cStderr) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stderr: %s", err) + } + errors <- err + }() + } + } else { + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + + if cStderr, err := container.StderrPipe(); err != nil { + utils.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&utils.NopWriter{}, cStderr) + } + }() + } + + return utils.Go(func() error { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + + // FIXME: how to clean up the stdin goroutine without the unwanted side effect + // of closing the passed stdin? Add an intermediary io.Pipe? + for i := 0; i < nJobs; i += 1 { + utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) + if err := <-errors; err != nil { + utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) + return err + } + utils.Debugf("attach: job %d completed successfully", i+1) + } + utils.Debugf("attach: all jobs completed successfully") + return nil + }) +} diff --git a/daemon/container.go b/daemon/container.go index 20a320307b..f4cc125ca4 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -170,150 +170,6 @@ func (container *Container) WriteHostConfig() (err error) { return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } -func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { - var cStdout, cStderr io.ReadCloser - - var nJobs int - errors := make(chan error, 3) - if stdin != nil && container.Config.OpenStdin { - nJobs += 1 - if cStdin, err := container.StdinPipe(); err != nil { - errors <- err - } else { - go func() { - utils.Debugf("attach: stdin: begin") - defer utils.Debugf("attach: stdin: end") - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if container.Config.StdinOnce && !container.Config.Tty { - defer cStdin.Close() - } else { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - } - if container.Config.Tty { - _, err = utils.CopyEscapable(cStdin, stdin) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdin: %s", err) - } - errors <- err - }() - } - } - if stdout != nil { - nJobs += 1 - if p, err := container.StdoutPipe(); err != nil { - errors <- err - } else { - cStdout = p - go func() { - utils.Debugf("attach: stdout: begin") - defer utils.Debugf("attach: stdout: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stdout, cStdout) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdout: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - if cStdout, err := container.StdoutPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStdout) - } - }() - } - if stderr != nil { - nJobs += 1 - if p, err := container.StderrPipe(); err != nil { - errors <- err - } else { - cStderr = p - go func() { - utils.Debugf("attach: stderr: begin") - defer utils.Debugf("attach: stderr: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stderr, cStderr) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stderr: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - - if cStderr, err := container.StderrPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStderr) - } - }() - } - - return utils.Go(func() error { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - - // FIXME: how to clean up the stdin goroutine without the unwanted side effect - // of closing the passed stdin? Add an intermediary io.Pipe? - for i := 0; i < nJobs; i += 1 { - utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) - if err := <-errors; err != nil { - utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) - return err - } - utils.Debugf("attach: job %d completed successfully", i+1) - } - utils.Debugf("attach: all jobs completed successfully") - return nil - }) -} - func populateCommand(c *Container, env []string) error { var ( en *execdriver.Network diff --git a/server/buildfile.go b/server/buildfile.go index 8466f4290e..24b0b58f25 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -6,12 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemon" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net/url" @@ -22,6 +16,13 @@ import ( "regexp" "sort" "strings" + + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemon" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" ) var ( @@ -644,10 +645,9 @@ func (b *buildFile) create() (*daemon.Container, error) { func (b *buildFile) run(c *daemon.Container) error { var errCh chan error - if b.verbose { errCh = utils.Go(func() error { - return <-c.Attach(nil, nil, b.outStream, b.errStream) + return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream) }) } diff --git a/server/server.go b/server/server.go index 04cc17a35a..47565f0022 100644 --- a/server/server.go +++ b/server/server.go @@ -2369,7 +2369,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { cStderr = job.Stderr } - <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) + <-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return From 2b0f88383afba28fe7b0bba989d115c2f5e2cc87 Mon Sep 17 00:00:00 2001 From: Kevin Menard Date: Mon, 5 May 2014 20:08:35 -0400 Subject: [PATCH 203/219] Use the correct "it's." --- docs/sources/use/working_with_volumes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/use/working_with_volumes.md b/docs/sources/use/working_with_volumes.md index c403532bcc..7d6136b85a 100644 --- a/docs/sources/use/working_with_volumes.md +++ b/docs/sources/use/working_with_volumes.md @@ -59,7 +59,7 @@ more new volumes to any container created from that image: ### Creating and mounting a Data Volume Container If you have some persistent data that you want to share between -containers, or want to use from non-persistent containers, its best to +containers, or want to use from non-persistent containers, it's best to create a named Data Volume Container, and then to mount the data from it. From cd818950919f0da868b36e32033607d5e6c98466 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 5 May 2014 10:47:55 -0700 Subject: [PATCH 204/219] Add alex as devmapper and btrfs maintainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/graphdriver/btrfs/MAINTAINERS | 1 + daemon/graphdriver/devmapper/MAINTAINERS | 1 + 2 files changed, 2 insertions(+) create mode 100644 daemon/graphdriver/btrfs/MAINTAINERS create mode 100644 daemon/graphdriver/devmapper/MAINTAINERS diff --git a/daemon/graphdriver/btrfs/MAINTAINERS b/daemon/graphdriver/btrfs/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/daemon/graphdriver/btrfs/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) diff --git a/daemon/graphdriver/devmapper/MAINTAINERS b/daemon/graphdriver/devmapper/MAINTAINERS new file mode 100644 index 0000000000..9e629d5fcc --- /dev/null +++ b/daemon/graphdriver/devmapper/MAINTAINERS @@ -0,0 +1 @@ +Alexander Larsson (@alexlarsson) From 53f38a14cd6b61a6b5df68cc3694dcba2b0c1eb7 Mon Sep 17 00:00:00 2001 From: Bryan Murphy Date: Mon, 7 Apr 2014 18:34:07 +0000 Subject: [PATCH 205/219] add linked containers to hosts file Docker-DCO-1.1-Signed-off-by: Bryan Murphy (github: bmurphy1976) Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) Tested-by: Solomon Hykes (github: shykes) --- daemon/container.go | 15 ++++++- docs/sources/reference/run.md | 9 +++- docs/sources/use/working_with_links_names.md | 29 +++++++++++++ integration-cli/docker_cli_links_test.go | 45 ++++++++++++++++++++ pkg/networkfs/etchosts/etchosts.go | 11 ++++- 5 files changed, 106 insertions(+), 3 deletions(-) create mode 100644 integration-cli/docker_cli_links_test.go diff --git a/daemon/container.go b/daemon/container.go index f4cc125ca4..7b6b65494e 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -357,7 +357,20 @@ func (container *Container) buildHostnameAndHostsFiles(IP string) error { } container.HostsPath = path.Join(container.root, "hosts") - return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname) + + extraContent := make(map[string]string) + + children, err := container.daemon.Children(container.Name) + if err != nil { + return err + } + + for linkAlias, child := range children { + _, alias := path.Split(linkAlias) + extraContent[alias] = child.NetworkSettings.IPAddress + } + + return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, &extraContent) } func (container *Container) allocateNetwork() error { diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index 521e8010e2..b6cb0a08fe 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -1,4 +1,4 @@ -page_title: Docker Run Reference +page_title: Docker Run Reference page_description: Configure containers at runtime page_keywords: docker, run, configure, runtime @@ -407,6 +407,13 @@ And we can use that information to connect from another container as a client: $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' 172.17.0.32:6379> +Docker will also map the private IP address to the alias of a linked +container by inserting an entry into `/etc/hosts`. You can use this +mechanism to communicate with a linked container by its alias: + + $ docker run -d --name servicename busybox sleep 30 + $ docker run -i -t --link servicename:servicealias busybox ping -c 1 servicealias + ## VOLUME (Shared Filesystems) -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. diff --git a/docs/sources/use/working_with_links_names.md b/docs/sources/use/working_with_links_names.md index dab66cef06..6951e3c26f 100644 --- a/docs/sources/use/working_with_links_names.md +++ b/docs/sources/use/working_with_links_names.md @@ -109,3 +109,32 @@ the Redis container. CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db + +## Resolving Links by Name + +New in version v0.11. + +Linked containers can be accessed by hostname. Hostnames are mapped by +appending entries to '/etc/hosts' using the linked container's alias. + +For example, linking a container using '--link redis:db' will generate the +following '/etc/hosts' file: + + root@6541a75d44a0:/# cat /etc/hosts + 172.17.0.3 6541a75d44a0 + 172.17.0.2 db + + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + ff00::0 ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + root@6541a75d44a0:/# + +Using this mechanism, you can communicate with the linked container by +name: + + root@6541a75d44a0:/# echo PING | redis-cli -h db + PONG + root@6541a75d44a0:/# diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go new file mode 100644 index 0000000000..5b43b3f8a9 --- /dev/null +++ b/integration-cli/docker_cli_links_test.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +func TestPingUnlinkedContainers(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + exitCode, err := runCommand(runCmd) + + if exitCode == 0 { + t.Fatal("run ping did not fail") + } else if exitCode != 1 { + errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + } +} + +func TestPingLinkedContainers(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-d", "--name", "container1", "busybox", "sleep", "10") + out, _, err := runCommandWithOutput(cmd) + errorOut(err, t, fmt.Sprintf("run container1 failed with errors: %v", err)) + idA := stripTrailingCharacters(out) + + cmd = exec.Command(dockerBinary, "run", "-d", "--name", "container2", "busybox", "sleep", "10") + out, _, err = runCommandWithOutput(cmd) + errorOut(err, t, fmt.Sprintf("run container2 failed with errors: %v", err)) + idB := stripTrailingCharacters(out) + + cmd = exec.Command(dockerBinary, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + out, _, err = runCommandWithOutput(cmd) + fmt.Printf("OUT: %s", out) + errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) + + cmd = exec.Command(dockerBinary, "kill", idA) + _, err = runCommand(cmd) + errorOut(err, t, fmt.Sprintf("failed to kill container1: %v", err)) + + cmd = exec.Command(dockerBinary, "kill", idB) + _, err = runCommand(cmd) + errorOut(err, t, fmt.Sprintf("failed to kill container2: %v", err)) + + deleteAllContainers() +} diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go index 169797071a..144a039bff 100644 --- a/pkg/networkfs/etchosts/etchosts.go +++ b/pkg/networkfs/etchosts/etchosts.go @@ -15,7 +15,7 @@ var defaultContent = map[string]string{ "ip6-allrouters": "ff02::2", } -func Build(path, IP, hostname, domainname string) error { +func Build(path, IP, hostname, domainname string, extraContent *map[string]string) error { content := bytes.NewBuffer(nil) if IP != "" { if domainname != "" { @@ -30,5 +30,14 @@ func Build(path, IP, hostname, domainname string) error { return err } } + + if extraContent != nil { + for hosts, ip := range *extraContent { + if _, err := content.WriteString(fmt.Sprintf("%s\t%s\n", ip, hosts)); err != nil { + return err + } + } + } + return ioutil.WriteFile(path, content.Bytes(), 0644) } From dc605c8be76760951d0d12e67409602c7b4b7973 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Mon, 5 May 2014 19:51:03 -0700 Subject: [PATCH 206/219] Simplify integration test for link + hostname. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- integration-cli/docker_cli_links_test.go | 27 ++++++------------------ 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 5b43b3f8a9..a159d1c799 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -18,28 +18,13 @@ func TestPingUnlinkedContainers(t *testing.T) { } func TestPingLinkedContainers(t *testing.T) { - cmd := exec.Command(dockerBinary, "run", "-d", "--name", "container1", "busybox", "sleep", "10") - out, _, err := runCommandWithOutput(cmd) - errorOut(err, t, fmt.Sprintf("run container1 failed with errors: %v", err)) + var out string + out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) - - cmd = exec.Command(dockerBinary, "run", "-d", "--name", "container2", "busybox", "sleep", "10") - out, _, err = runCommandWithOutput(cmd) - errorOut(err, t, fmt.Sprintf("run container2 failed with errors: %v", err)) + out, _, _ = cmd("run", "-d", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) - - cmd = exec.Command(dockerBinary, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - out, _, err = runCommandWithOutput(cmd) - fmt.Printf("OUT: %s", out) - errorOut(err, t, fmt.Sprintf("run ping failed with errors: %v", err)) - - cmd = exec.Command(dockerBinary, "kill", idA) - _, err = runCommand(cmd) - errorOut(err, t, fmt.Sprintf("failed to kill container1: %v", err)) - - cmd = exec.Command(dockerBinary, "kill", idB) - _, err = runCommand(cmd) - errorOut(err, t, fmt.Sprintf("failed to kill container2: %v", err)) - + cmd("run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + cmd("kill", idA) + cmd("kill", idB) deleteAllContainers() } From 9eeff6d099a951c3a3e45d63ce2f8cb158aaeb6c Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 6 May 2014 20:26:44 +1000 Subject: [PATCH 207/219] Update the run --net cli help to include the 'host' option and then add that to the run and cli docs Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 32 +++++++++++------------ docs/sources/reference/run.md | 6 ++--- runconfig/parse.go | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 1bbc3585fd..8936bbe332 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -809,33 +809,33 @@ Run a command in a new container Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - -a, --attach=map[]: Attach to stdin, stdout or stderr + -a, --attach=[]: Attach to stdin, stdout or stderr. -c, --cpu-shares=0: CPU shares (relative weight) --cidfile="": Write the container ID to the file -d, --detach=false: Detached mode: Run container in the background, print new container id + --dns=[]: Set custom dns servers + --dns-search=[]: Set custom dns search domains -e, --env=[]: Set environment variables - --env-file="": Read in a line delimited file of ENV variables + --entrypoint="": Overwrite the default entrypoint of the image + --env-file=[]: Read in a line delimited file of ENV variables + --expose=[]: Expose a port from the container without publishing it to your host -h, --hostname="": Container host name -i, --interactive=false: Keep stdin open even if not attached - --privileged=false: Give extended privileges to this container + --link=[]: Add link to another container (name:alias) + --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -m, --memory="": Memory limit (format: , where unit = b, k, m or g) - -n, --networking=true: Enable networking for this container - -p, --publish=[]: Map a network port to the container + --name="": Assign a name to the container + --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container + -P, --publish-all=false: Publish all exposed ports to the host interfaces + -p, --publish=[]: Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping) + --privileged=false: Give extended privileges to this container --rm=false: Automatically remove the container when it exits (incompatible with -d) + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) -t, --tty=false: Allocate a pseudo-tty -u, --user="": Username or UID - --dns=[]: Set custom dns servers for the container - --dns-search=[]: Set custom DNS search domains for the container - -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. - --volumes-from="": Mount all volumes from the given container(s) - --entrypoint="": Overwrite the default entrypoint set by the image + -v, --volume=[]: Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) + --volumes-from=[]: Mount volumes from the specified container(s) -w, --workdir="": Working directory inside the container - --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - --expose=[]: Expose a port from the container without publishing it to your host - --link="": Add link to another container (name:alias) - --name="": Assign the specified name to the container. If no name is specific docker will generate a random name - -P, --publish-all=false: Publish all exposed ports to the host interfaces The `docker run` command first `creates` a writeable container layer over the specified image, and then `starts` it using the specified command. That is, diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index b6cb0a08fe..b3415330fe 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -136,12 +136,12 @@ PID files): ## Network Settings - --dns=[] : Set custom dns servers for the container - --net=bridge : Set the network mode + --dns=[] : Set custom dns servers for the container + --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking -with `docker run -n` which disables all incoming and +with `docker run --net none` which disables all incoming and outgoing networking. In cases like this, you would perform I/O through files or STDIN/STDOUT only. diff --git a/runconfig/parse.go b/runconfig/parse.go index 0d511ef2ec..74b7801532 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -62,7 +62,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack)") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") From 14f65ab83b4f72ea56b3e98023e941474d4e9dd8 Mon Sep 17 00:00:00 2001 From: cyphar Date: Wed, 7 May 2014 00:42:22 +1000 Subject: [PATCH 208/219] pkg: networkfs: etchosts: fixed tests This patch fixes the fact that the tests for pkg/networkfs/etchosts couldn't build due to syntax errors. Docker-DCO-1.1-Signed-off-by: Aleksa Sarai (github: cyphar) --- pkg/networkfs/etchosts/etchosts_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go index da5662d64f..44406c81b8 100644 --- a/pkg/networkfs/etchosts/etchosts_test.go +++ b/pkg/networkfs/etchosts/etchosts_test.go @@ -14,7 +14,7 @@ func TestBuildHostnameDomainname(t *testing.T) { } defer os.Remove(file.Name()) - err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname") + err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil) if err != nil { t.Fatal(err) } @@ -36,7 +36,7 @@ func TestBuildHostname(t *testing.T) { } defer os.Remove(file.Name()) - err = Build(file.Name(), "10.11.12.13", "testhostname", "") + err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil) if err != nil { t.Fatal(err) } @@ -58,7 +58,7 @@ func TestBuildNoIP(t *testing.T) { } defer os.Remove(file.Name()) - err = Build(file.Name(), "", "testhostname", "") + err = Build(file.Name(), "", "testhostname", "", nil) if err != nil { t.Fatal(err) } From 924979259ec4c9ef6beab0468325f1cb04deaacb Mon Sep 17 00:00:00 2001 From: cyphar Date: Wed, 7 May 2014 01:05:15 +1000 Subject: [PATCH 209/219] integration-cli: docker_cli_links: fixed broken tests The tests weren't ... tested when last edited, this patch fixes them so that they run and pass correctly. Docker-DCO-1.1-Signed-off-by: Aleksa Sarai (github: cyphar) --- integration-cli/docker_cli_links_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index a159d1c799..55c41e0bbc 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -21,10 +21,10 @@ func TestPingLinkedContainers(t *testing.T) { var out string out, _, _ = cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) - out, _, _ = cmd("run", "-d", "--name", "container2", "busybox", "sleep", "10") + out, _, _ = cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) - cmd("run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - cmd("kill", idA) - cmd("kill", idB) + cmd(t, "run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + cmd(t, "kill", idA) + cmd(t, "kill", idB) deleteAllContainers() } From 69d43b2674aa8ed69c641556cae68d405505a45b Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Tue, 6 May 2014 15:53:38 +0000 Subject: [PATCH 210/219] Remove support for MemoryReservation in systemd systems. This has been deperecated since systemd 208. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/cgroups/systemd/apply_systemd.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/cgroups/systemd/apply_systemd.go b/pkg/cgroups/systemd/apply_systemd.go index 12dede9581..c4b0937b63 100644 --- a/pkg/cgroups/systemd/apply_systemd.go +++ b/pkg/cgroups/systemd/apply_systemd.go @@ -146,11 +146,7 @@ func Apply(c *cgroups.Cgroup, pid int) (cgroups.ActiveCgroup, error) { properties = append(properties, systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) } - if c.MemoryReservation != 0 { - properties = append(properties, - systemd1.Property{"MemorySoftLimit", dbus.MakeVariant(uint64(c.MemoryReservation))}) - } - // TODO: MemorySwap not available in systemd + // TODO: MemoryReservation and MemorySwap not available in systemd if c.CpuShares != 0 { properties = append(properties, From 543e60eb60fed2734c10953216003325beddd536 Mon Sep 17 00:00:00 2001 From: Victor Marmol Date: Mon, 5 May 2014 23:56:53 +0000 Subject: [PATCH 211/219] Export cpuacct CPU usage in total cores over the sampled period. Docker-DCO-1.1-Signed-off-by: Victor Marmol (github: vmarmol) --- pkg/cgroups/fs/cpuacct.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/cgroups/fs/cpuacct.go b/pkg/cgroups/fs/cpuacct.go index 4ea2b1f51b..892b5ab6b1 100644 --- a/pkg/cgroups/fs/cpuacct.go +++ b/pkg/cgroups/fs/cpuacct.go @@ -36,9 +36,9 @@ func (s *cpuacctGroup) Remove(d *data) error { func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { var ( - startCpu, lastCpu, startSystem, lastSystem float64 - percentage float64 - paramData = make(map[string]float64) + startCpu, lastCpu, startSystem, lastSystem, startUsage, lastUsage float64 + percentage float64 + paramData = make(map[string]float64) ) path, err := d.path("cpuacct") if startCpu, err = s.getCpuUsage(d, path); err != nil { @@ -47,6 +47,10 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { if startSystem, err = s.getSystemCpuUsage(d); err != nil { return nil, err } + startUsageTime := time.Now() + if startUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { + return nil, err + } // sample for 100ms time.Sleep(100 * time.Millisecond) if lastCpu, err = s.getCpuUsage(d, path); err != nil { @@ -55,10 +59,15 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { if lastSystem, err = s.getSystemCpuUsage(d); err != nil { return nil, err } + usageSampleDuration := time.Since(startUsageTime) + if lastUsage, err = getCgroupParamFloat64(path, "cpuacct.usage"); err != nil { + return nil, err + } var ( deltaProc = lastCpu - startCpu deltaSystem = lastSystem - startSystem + deltaUsage = lastUsage - startUsage ) if deltaSystem > 0.0 { percentage = ((deltaProc / deltaSystem) * clockTicks) * cpuCount @@ -66,6 +75,9 @@ func (s *cpuacctGroup) Stats(d *data) (map[string]float64, error) { // NOTE: a percentage over 100% is valid for POSIX because that means the // processes is using multiple cores paramData["percentage"] = percentage + + // Delta usage is in nanoseconds of CPU time so get the usage (in cores) over the sample time. + paramData["usage"] = deltaUsage / float64(usageSampleDuration.Nanoseconds()) return paramData, nil } From 8d07c2d1aeb3326f1f62854e6adfd26f0d8e0342 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 6 May 2014 11:39:11 -0700 Subject: [PATCH 212/219] Fix logo in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1922be5d8a..fae1bb916b 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. -![Docker L](docs/theme/docker/static/img/dockerlogo-h.png "Docker") +![Docker L](docs/theme/mkdocs/img/logo_compressed.png "Docker") ## Better than VMs From 77098d5b5bf8840a1179380b34aedb26139b9d65 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 6 May 2014 17:43:46 +0000 Subject: [PATCH 213/219] use tabwriter to display usage in mflag Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/mflag/example/example.go | 13 ++++++++----- pkg/mflag/flag.go | 17 ++++++++++++++--- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go index ce9dd30e4c..2d78baa172 100644 --- a/pkg/mflag/example/example.go +++ b/pkg/mflag/example/example.go @@ -2,6 +2,7 @@ package main import ( "fmt" + flag "github.com/dotcloud/docker/pkg/mflag" ) @@ -19,15 +20,17 @@ func init() { flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") flag.Parse() } func main() { if h { flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %b\n", b) + fmt.Printf("-bool: %b\n", b2) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) } - fmt.Printf("s/#hidden/-string: %s\n", str) - fmt.Printf("b: %b\n", b) - fmt.Printf("-bool: %b\n", b2) - fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) - fmt.Printf("ARGS: %v\n", flag.Args()) } diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index ed6fad3b46..ed85a4a4c5 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -83,6 +83,7 @@ import ( "sort" "strconv" "strings" + "text/tabwriter" "time" ) @@ -419,11 +420,12 @@ func Set(name, value string) error { // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(f.out(), 20, 1, 3, ' ', 0) f.VisitAll(func(flag *Flag) { - format := " -%s=%s: %s\n" + format := " -%s=%s" if _, ok := flag.Value.(*stringValue); ok { // put quotes on the value - format = " -%s=%q: %s\n" + format = " -%s=%q" } names := []string{} for _, name := range flag.Names { @@ -432,9 +434,18 @@ func (f *FlagSet) PrintDefaults() { } } if len(names) > 0 { - fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) + fmt.Fprintf(writer, format, strings.Join(names, ", -"), flag.DefValue) + for i, line := range strings.Split(flag.Usage, "\n") { + if i != 0 { + line = " " + line + } + fmt.Fprintln(writer, "\t", line) + } + // start := fmt.Sprintf(format, strings.Join(names, ", -"), flag.DefValue) + // fmt.Fprintln(f.out(), start, strings.Replace(flag.Usage, "\n", "\n"+strings.Repeat(" ", len(start)+1), -1)) } }) + writer.Flush() } // PrintDefaults prints to standard error the default values of all defined command-line flags. From b622da3cfe211d31df69e72a93ed4fae872aca65 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 6 May 2014 17:51:20 +0000 Subject: [PATCH 214/219] improve some usages Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- docker/docker.go | 8 ++++---- runconfig/parse.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index 26ccd24bb4..60f34a1f14 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -43,11 +43,11 @@ func main() { flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") - bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking") + bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking") bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime") - flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group") + flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIp4Address) flDnsSearch = opts.NewListOpts(opts.ValidateDomain) @@ -58,7 +58,7 @@ func main() { flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) - flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available") + flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here") @@ -68,7 +68,7 @@ func main() { ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") - flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") + flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") flag.Parse() diff --git a/runconfig/parse.go b/runconfig/parse.go index 74b7801532..9142b175af 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -62,7 +62,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container") + flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:': reuses another container network stack\n'host': use the host network stack inside the contaner") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") @@ -74,7 +74,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of ENV variables") - cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) + cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host\nformat: %s\n(use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") From cfb232cff27da2dd46ec04a5bf6699ab1d1df91c Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 7 May 2014 09:40:49 +1000 Subject: [PATCH 215/219] update the docs to reflect the nice \n handling Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.md | 60 +++++++++++++---------- docs/sources/reference/run.md | 5 ++ 2 files changed, 38 insertions(+), 27 deletions(-) diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 8936bbe332..8e0507cbf8 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -809,33 +809,39 @@ Run a command in a new container Usage: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] - -a, --attach=[]: Attach to stdin, stdout or stderr. - -c, --cpu-shares=0: CPU shares (relative weight) - --cidfile="": Write the container ID to the file - -d, --detach=false: Detached mode: Run container in the background, print new container id - --dns=[]: Set custom dns servers - --dns-search=[]: Set custom dns search domains - -e, --env=[]: Set environment variables - --entrypoint="": Overwrite the default entrypoint of the image - --env-file=[]: Read in a line delimited file of ENV variables - --expose=[]: Expose a port from the container without publishing it to your host - -h, --hostname="": Container host name - -i, --interactive=false: Keep stdin open even if not attached - --link=[]: Add link to another container (name:alias) - --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -m, --memory="": Memory limit (format: , where unit = b, k, m or g) - --name="": Assign a name to the container - --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container - -P, --publish-all=false: Publish all exposed ports to the host interfaces - -p, --publish=[]: Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping) - --privileged=false: Give extended privileges to this container - --rm=false: Automatically remove the container when it exits (incompatible with -d) - --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -t, --tty=false: Allocate a pseudo-tty - -u, --user="": Username or UID - -v, --volume=[]: Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) - --volumes-from=[]: Mount volumes from the specified container(s) - -w, --workdir="": Working directory inside the container + -a, --attach=[] Attach to stdin, stdout or stderr. + -c, --cpu-shares=0 CPU shares (relative weight) + --cidfile="" Write the container ID to the file + -d, --detach=false Detached mode: Run container in the background, print new container id + --dns=[] Set custom dns servers + --dns-search=[] Set custom dns search domains + -e, --env=[] Set environment variables + --entrypoint="" Overwrite the default entrypoint of the image + --env-file=[] Read in a line delimited file of ENV variables + --expose=[] Expose a port from the container without publishing it to your host + -h, --hostname="" Container host name + -i, --interactive=false Keep stdin open even if not attached + --link=[] Add link to another container (name:alias) + --lxc-conf=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + -m, --memory="" Memory limit (format: , where unit = b, k, m or g) + --name="" Assign a name to the container + --net="bridge" Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the contaner + -p, --publish=[] Publish a container's port to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort + (use 'docker port' to see the actual mapping) + -P, --publish-all=false Publish all exposed ports to the host interfaces + --privileged=false Give extended privileges to this container + --rm=false Automatically remove the container when it exits (incompatible with -d) + --sig-proxy=true Proxify all received signal to the process (even in non-tty mode) + -t, --tty=false Allocate a pseudo-tty + -u, --user="" Username or UID + -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container) + --volumes-from=[] Mount volumes from the specified container(s) + -w, --workdir="" Working directory inside the container The `docker run` command first `creates` a writeable container layer over the specified image, and then `starts` it using the specified command. That is, diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md index b3415330fe..09c2b642a1 100644 --- a/docs/sources/reference/run.md +++ b/docs/sources/reference/run.md @@ -138,6 +138,11 @@ PID files): --dns=[] : Set custom dns servers for the container --net="bridge": Set the Network mode for the container ('bridge': creates a new network stack for the container on the docker bridge, 'none': no networking for this container, 'container:': reuses another container network stack), 'host': use the host network stack inside the container + --net="bridge" Set the Network mode for the container + 'bridge': creates a new network stack for the container on the docker bridge + 'none': no networking for this container + 'container:': reuses another container network stack + 'host': use the host network stack inside the contaner By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable networking From 62e8ddb5791b9ee62c3f4361084dda4a5d7760e1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 6 May 2014 17:04:04 -0700 Subject: [PATCH 216/219] Set container pid for process in native driver Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemon/execdriver/native/driver.go | 1 + 1 file changed, 1 insertion(+) diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index e674d57333..2e57729d4b 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -122,6 +122,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba return &c.Cmd }, func() { if startCallback != nil { + c.ContainerPid = c.Process.Pid startCallback(c) } }) From 9e64a4d862e38ead4c2708ce61663f176844406b Mon Sep 17 00:00:00 2001 From: Jonathan McCrohan Date: Wed, 7 May 2014 00:30:17 +0100 Subject: [PATCH 217/219] mkimage-debootstrap: set ubuntuLatestLTS as Trusty Ubuntu 14.04 LTS (Trusty Tahr) was released on Thu, 17 Apr 2014; Update ubuntuLatestLTS accordingly. Docker-DCO-1.1-Signed-off-by: Jonathan McCrohan (github: jmccrohan) --- contrib/mkimage-debootstrap.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh index 33ba7b07cb..613066e16b 100755 --- a/contrib/mkimage-debootstrap.sh +++ b/contrib/mkimage-debootstrap.sh @@ -43,7 +43,7 @@ usage() { debianStable=wheezy debianUnstable=sid # this should match the name found at http://releases.ubuntu.com/ -ubuntuLatestLTS=precise +ubuntuLatestLTS=trusty # this should match the name found at http://releases.tanglu.org/ tangluLatest=aequorea From 15209c380c3f510e3f8d5ba1ff5fcc5cc8db3357 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 6 May 2014 18:03:41 -0700 Subject: [PATCH 218/219] Bump version to v0.11.0 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- CHANGELOG.md | 12 ++++++++++++ VERSION | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8743d3a7db..bd6dc6026e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + ## 0.10.0 (2014-04-08) #### Builder diff --git a/VERSION b/VERSION index 29b2d3ea50..d9df1bbc0c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.10.0-dev +0.11.0 From 1ed63f1d16669dc2ce7eb75f3d56ea8a5c27ab39 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 7 May 2014 11:38:02 -0700 Subject: [PATCH 219/219] Change version to v0.11.0-dev Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d9df1bbc0c..eb1336c84d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.11.0 +0.11.0-dev