diff --git a/.gitignore b/.gitignore
index cd641d8ffe..61a00fe77c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.exe
*.orig
+*.rej
*.test
.*.swp
.DS_Store
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d168ad280a..0894d62f1e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,22 @@
# Changelog
+## 1.6.2 (2015-05-13)
+
+#### Runtime
+- Revert change prohibiting mounting into /sys
+
+## 1.6.1 (2015-05-07)
+
+#### Security
+- Fix read/write /proc paths (CVE-2015-3630)
+- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631)
+- Fix opening of file-descriptor 1 (CVE-2015-3627)
+- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629)
+- Prohibit mount of /sys
+
+#### Runtime
+- Update Apparmor policy to not allow mounts
+
## 1.6.0 (2015-04-07)
#### Builder
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 395f259234..d9068d1d95 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -129,12 +129,12 @@ However, there might be a way to implement that feature *on top of* Docker.
- Internet Relay Chat (IRC)
+ | Internet Relay Chat (IRC) |
IRC a direct line to our most knowledgeable Docker users; we have
- both the #docker and #docker-dev group on
- irc.freenode.net.
+ both the #docker and #docker-dev group on
+ irc.freenode.net.
IRC is a rich chat protocol but it can overwhelm new users. You can search
our chat archives.
@@ -146,9 +146,9 @@ However, there might be a way to implement that feature *on top of* Docker.
|
There are two groups.
Docker-user
- is for people using Docker containers.
- The docker-dev
- group is for contributors and other people contributing to the Docker
+ is for people using Docker containers.
+ The docker-dev
+ group is for contributors and other people contributing to the Docker
project.
|
@@ -156,14 +156,14 @@ However, there might be a way to implement that feature *on top of* Docker.
Twitter |
You can follow Docker's Twitter feed
- to get updates on our products. You can also tweet us questions or just
+ to get updates on our products. You can also tweet us questions or just
share blogs or stories.
|
Stack Overflow |
- Stack Overflow has over 7000K Docker questions listed. We regularly
+ Stack Overflow has over 7000K Docker questions listed. We regularly
monitor Docker questions
and so do many other knowledgeable Docker users.
|
diff --git a/Dockerfile b/Dockerfile
index 6be471a7bb..a74712adec 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -26,6 +26,9 @@
FROM ubuntu:14.04
MAINTAINER Tianon Gravi (@tianon)
+RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
+RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
+
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
@@ -50,6 +53,8 @@ RUN apt-get update && apt-get install -y \
ruby1.9.1 \
ruby1.9.1-dev \
s3cmd=1.1.0* \
+ ubuntu-zfs \
+ libzfs-dev \
--no-install-recommends
# Get lvm2 source for compiling statically
@@ -121,7 +126,8 @@ RUN set -x \
&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
- go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
+ go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+ && rm -rf /go/src/github.com/docker/distribution/
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
@@ -157,23 +163,25 @@ RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
- hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5
+ hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5 \
+ jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d
# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
-# Install man page generator
-COPY vendor /go/src/github.com/docker/docker/vendor
-# (copy vendor/ because go-md2man needs golang.org/x/net)
+# Download man page generator
RUN set -x \
&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
- && git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \
- && go install -v github.com/cpuguy83/go-md2man
+ && git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday
-# install toml validator
+# Download toml validator
ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
RUN set -x \
&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
- && (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \
- && go install -v github.com/BurntSushi/toml/cmd/tomlv
+ && (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT)
+
+# copy vendor/ because go-md2man needs golang.org/x/net
+COPY vendor /go/src/github.com/docker/docker/vendor
+RUN go install -v github.com/cpuguy83/go-md2man \
+ github.com/BurntSushi/toml/cmd/tomlv
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
diff --git a/MAINTAINERS b/MAINTAINERS
index 5c02fa671b..0e06d8818d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -296,7 +296,9 @@ made through a pull request.
[Org.Operators.security]
people = [
- "erw"
+ "erw",
+ "diogomonica",
+ "nathanmccauley"
]
[Org.Operators."monthly meetings"]
@@ -312,6 +314,11 @@ made through a pull request.
"jfrazelle",
"crosbymichael"
]
+
+ [Org.Operators.community]
+ people = [
+ "theadactyl"
+ ]
# The chief maintainer is responsible for all aspects of quality for the project including
# code reviews, usability, stability, security, performance, etc.
@@ -319,6 +326,17 @@ made through a pull request.
# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
# be fine".
"Chief Maintainer" = "crosbymichael"
+
+ # The community manager is responsible for serving the project community, including users,
+ # contributors and partners. This involves:
+ # - facilitating communication between maintainers, contributors and users
+ # - organizing contributor and maintainer events
+ # - helping new contributors get involved
+ # - anything the project community needs to be successful
+ #
+ # The community manager is a point of contact for any contributor who has questions, concerns
+ # or feedback about project operations.
+ "Community Manager" = "theadactyl"
[Org."Core maintainers"]
@@ -345,6 +363,7 @@ made through a pull request.
"icecrime",
"jfrazelle",
"lk4d4",
+ "runcom",
"tibor",
"unclejack",
"vbatts",
@@ -365,43 +384,43 @@ made through a pull request.
# 1. Exposing a clear road map for improving their subsystem.
# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
# 3. Be available to anyone with questions, bug reports, criticism etc.
- # on their component. This includes IRC, GitHub requests and the mailing
- # list.
+ # on their component. This includes IRC, GitHub requests and the mailing
+ # list.
# 4. Make sure their subsystem respects the philosophy, design and
- # road map of the project.
+ # road map of the project.
#
# #### How to review patches to your subsystem
#
# Accepting pull requests:
#
- # - If the pull request appears to be ready to merge, give it a `LGTM`, which
- # stands for "Looks Good To Me".
- # - If the pull request has some small problems that need to be changed, make
- # a comment adressing the issues.
- # - If the changes needed to a PR are small, you can add a "LGTM once the
- # following comments are adressed..." this will reduce needless back and
- # forth.
- # - If the PR only needs a few changes before being merged, any MAINTAINER can
- # make a replacement PR that incorporates the existing commits and fixes the
- # problems before a fast track merge.
+ # - If the pull request appears to be ready to merge, give it a `LGTM`, which
+ # stands for "Looks Good To Me".
+ # - If the pull request has some small problems that need to be changed, make
+ # a comment adressing the issues.
+ # - If the changes needed to a PR are small, you can add a "LGTM once the
+ # following comments are adressed..." this will reduce needless back and
+ # forth.
+ # - If the PR only needs a few changes before being merged, any MAINTAINER can
+ # make a replacement PR that incorporates the existing commits and fixes the
+ # problems before a fast track merge.
#
# Closing pull requests:
#
- # - If a PR appears to be abandoned, after having attempted to contact the
- # original contributor, then a replacement PR may be made. Once the
- # replacement PR is made, any contributor may close the original one.
- # - If you are not sure if the pull request implements a good feature or you
- # do not understand the purpose of the PR, ask the contributor to provide
- # more documentation. If the contributor is not able to adequately explain
- # the purpose of the PR, the PR may be closed by any MAINTAINER.
- # - If a MAINTAINER feels that the pull request is sufficiently architecturally
- # flawed, or if the pull request needs significantly more design discussion
- # before being considered, the MAINTAINER should close the pull request with
- # a short explanation of what discussion still needs to be had. It is
- # important not to leave such pull requests open, as this will waste both the
- # MAINTAINER's time and the contributor's time. It is not good to string a
- # contributor on for weeks or months, having them make many changes to a PR
- # that will eventually be rejected.
+ # - If a PR appears to be abandoned, after having attempted to contact the
+ # original contributor, then a replacement PR may be made. Once the
+ # replacement PR is made, any contributor may close the original one.
+ # - If you are not sure if the pull request implements a good feature or you
+ # do not understand the purpose of the PR, ask the contributor to provide
+ # more documentation. If the contributor is not able to adequately explain
+ # the purpose of the PR, the PR may be closed by any MAINTAINER.
+ # - If a MAINTAINER feels that the pull request is sufficiently architecturally
+ # flawed, or if the pull request needs significantly more design discussion
+ # before being considered, the MAINTAINER should close the pull request with
+ # a short explanation of what discussion still needs to be had. It is
+ # important not to leave such pull requests open, as this will waste both the
+ # MAINTAINER's time and the contributor's time. It is not good to string a
+ # contributor on for weeks or months, having them make many changes to a PR
+ # that will eventually be rejected.
[Org.Subsystems.Documentation]
@@ -527,6 +546,11 @@ made through a pull request.
Email = "crosbymichael@gmail.com"
GitHub = "crosbymichael"
+ [people.diogomonica]
+ Name = "Diogo Monica"
+ Email = "diogo@docker.com"
+ GitHub = "diogomonica"
+
[people.duglin]
Name = "Doug Davis"
Email = "dug@us.ibm.com"
@@ -574,7 +598,7 @@ made through a pull request.
[people.jfrazelle]
Name = "Jessie Frazelle"
- Email = "jess@docker.com"
+ Email = "j@docker.com"
GitHub = "jfrazelle"
[people.jlhawn]
@@ -592,6 +616,16 @@ made through a pull request.
Email = "mary.anthony@docker.com"
GitHub = "moxiegirl"
+ [people.nathanmccauley]
+ Name = "Nathan McCauley"
+ Email = "nathan.mccauley@docker.com"
+ GitHub = "nathanmccauley"
+
+ [people.runcom]
+ Name = "Antonio Murdaca"
+ Email = "me@runcom.ninja"
+ GitHub = "runcom"
+
[people.sday]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
@@ -616,6 +650,11 @@ made through a pull request.
Name = "Sebastiaan van Stijn"
Email = "github@gone.nl"
GitHub = "thaJeztah"
+
+ [people.theadactyl]
+ Name = "Thea Lamkin"
+ Email = "thea@docker.com"
+ GitHub = "theadactyl"
[people.tianon]
Name = "Tianon Gravi"
diff --git a/Makefile b/Makefile
index b60b2a4d00..d73c8c13aa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration-cli test-docker-py validate
# env vars passed through directly to Docker's build scripts
# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
@@ -7,7 +7,10 @@ DOCKER_ENVS := \
-e BUILDFLAGS \
-e DOCKER_CLIENTONLY \
-e DOCKER_EXECDRIVER \
+ -e DOCKER_EXPERIMENTAL \
-e DOCKER_GRAPHDRIVER \
+ -e DOCKER_STORAGE_OPTS \
+ -e DOCKER_USERLANDPROXY \
-e TESTDIRS \
-e TESTFLAGS \
-e TIMEOUT
@@ -26,7 +29,7 @@ DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR))
DOCSPORT := 8000
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
@@ -62,14 +65,11 @@ docs-test: docs-build
$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
test: build
- $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py
+ $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration-cli test-docker-py
test-unit: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
-test-integration: build
- $(DOCKER_RUN_DOCKER) hack/make.sh test-integration
-
test-integration-cli: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
diff --git a/README.md b/README.md
index 5603a55a7f..ad15e56bc5 100644
--- a/README.md
+++ b/README.md
@@ -207,6 +207,53 @@ or want to get more involved, the best place to start is [the project directory]
We are always open to suggestions on process improvements, and are always looking for more maintainers.
+### Talking to other Docker users and contributors
+
+
+
+
+
+ Internet Relay Chat (IRC) |
+
+
+ IRC a direct line to our most knowledgeable Docker users; we have
+ both the #docker and #docker-dev group on
+ irc.freenode.net.
+ IRC is a rich chat protocol but it can overwhelm new users. You can search
+ our chat archives.
+
+ Read our IRC quickstart guide for an easy way to get started.
+ |
+
+
+ Google Groups |
+
+ There are two groups.
+ Docker-user
+ is for people using Docker containers.
+ The docker-dev
+ group is for contributors and other people contributing to the Docker
+ project.
+ |
+
+
+ Twitter |
+
+ You can follow Docker's Twitter feed
+ to get updates on our products. You can also tweet us questions or just
+ share blogs or stories.
+ |
+
+
+ Stack Overflow |
+
+ Stack Overflow has over 7000K Docker questions listed. We regularly
+ monitor Docker questions
+ and so do many other knowledgeable Docker users.
+ |
+
+
+
### Legal
*Brought to you courtesy of our legal counsel. For more context,
diff --git a/api/client/build.go b/api/client/build.go
index e83de976be..b09597152a 100644
--- a/api/client/build.go
+++ b/api/client/build.go
@@ -17,9 +17,8 @@ import (
"strconv"
"strings"
- "github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
- "github.com/docker/docker/graph"
+ "github.com/docker/docker/graph/tags"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/jsonmessage"
@@ -55,9 +54,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+ flCpuPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
+ flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
cmd.Require(flag.Exact, 1)
cmd.ParseFlags(args, true)
@@ -190,14 +191,14 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// windows: show error message about modified file permissions
// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
if runtime.GOOS == "windows" {
- logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
+ fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
- sf := streamformatter.NewStreamFormatter(false)
+ sf := streamformatter.NewStreamFormatter()
body = progressreader.New(progressreader.Config{
In: context,
Out: cli.out,
@@ -239,7 +240,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
return err
}
if len(tag) > 0 {
- if err := graph.ValidateTagName(tag); err != nil {
+ if err := tags.ValidateTagName(tag); err != nil {
return err
}
}
@@ -274,8 +275,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
v.Set("cpusetmems", *flCPUSetMems)
v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
+ v.Set("cpuperiod", strconv.FormatInt(*flCpuPeriod, 10))
v.Set("memory", strconv.FormatInt(memory, 10))
v.Set("memswap", strconv.FormatInt(memorySwap, 10))
+ v.Set("cgroupparent", *flCgroupParent)
v.Set("dockerfile", *dockerfileName)
@@ -289,7 +292,13 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
if context != nil {
headers.Set("Content-Type", "application/tar")
}
- err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
+ sopts := &streamOpts{
+ rawTerminal: true,
+ in: body,
+ out: cli.out,
+ headers: headers,
+ }
+ err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
diff --git a/api/client/cli.go b/api/client/cli.go
index 600d4cc5a3..f78827dfa1 100644
--- a/api/client/cli.go
+++ b/api/client/cli.go
@@ -6,19 +6,18 @@ import (
"errors"
"fmt"
"io"
- "net"
"net/http"
"os"
"path/filepath"
"reflect"
"strings"
"text/template"
- "time"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/term"
+ "github.com/docker/docker/utils"
)
// DockerCli represents the docker command line client.
@@ -63,6 +62,14 @@ var funcMap = template.FuncMap{
},
}
+func (cli *DockerCli) Out() io.Writer {
+ return cli.out
+}
+
+func (cli *DockerCli) Err() io.Writer {
+ return cli.err
+}
+
func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
camelArgs := make([]string, len(args))
for i, s := range args {
@@ -90,8 +97,7 @@ func (cli *DockerCli) Cmd(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
- fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
- os.Exit(1)
+ return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
}
return method(args[1:]...)
}
@@ -171,19 +177,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a
tr := &http.Transport{
TLSClientConfig: tlsConfig,
}
-
- // Why 32? See https://github.com/docker/docker/pull/8035.
- timeout := 32 * time.Second
- if proto == "unix" {
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return net.DialTimeout(proto, addr, timeout)
- }
- } else {
- tr.Proxy = http.ProxyFromEnvironment
- tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
- }
+ utils.ConfigureTCPTransport(tr, proto, addr)
configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
if e != nil {
diff --git a/api/client/cp.go b/api/client/cp.go
index 392e362929..d195601ba6 100644
--- a/api/client/cp.go
+++ b/api/client/cp.go
@@ -16,7 +16,7 @@ import (
//
// Usage: docker cp CONTAINER:PATH HOSTDIR
func (cli *DockerCli) CmdCp(args ...string) error {
- cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true)
+ cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data as a tar file to STDOUT.", true)
cmd.Require(flag.Exact, 2)
cmd.ParseFlags(args, true)
diff --git a/api/client/create.go b/api/client/create.go
index b0819a05d7..a59c09cd62 100644
--- a/api/client/create.go
+++ b/api/client/create.go
@@ -10,7 +10,7 @@ import (
"strings"
"github.com/docker/docker/api/types"
- "github.com/docker/docker/graph"
+ "github.com/docker/docker/graph/tags"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
@@ -26,7 +26,7 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
repos, tag := parsers.ParseRepositoryTag(image)
// pull only the image tagged 'latest' if no tag was specified
if tag == "" {
- tag = graph.DEFAULTTAG
+ tag = tags.DEFAULTTAG
}
v.Set("fromImage", repos)
v.Set("tag", tag)
@@ -47,7 +47,12 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
- if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
+ sopts := &streamOpts{
+ rawTerminal: true,
+ out: out,
+ headers: map[string][]string{"X-Registry-Auth": registryAuthHeader},
+ }
+ if err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil {
return err
}
return nil
@@ -95,7 +100,7 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc
if statusCode == 404 && strings.Contains(err.Error(), config.Image) {
repo, tag := parsers.ParseRepositoryTag(config.Image)
if tag == "" {
- tag = graph.DEFAULTTAG
+ tag = tags.DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
diff --git a/api/client/events.go b/api/client/events.go
index 2154e0ccd0..75144b09c7 100644
--- a/api/client/events.go
+++ b/api/client/events.go
@@ -2,8 +2,6 @@ package client
import (
"net/url"
- "strconv"
- "time"
"github.com/docker/docker/opts"
flag "github.com/docker/docker/pkg/mflag"
@@ -26,7 +24,6 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
var (
v = url.Values{}
- loc = time.FixedZone(time.Now().Zone())
eventFilterArgs = filters.Args{}
)
@@ -39,22 +36,11 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
return err
}
}
- var setTime = func(key, value string) {
- format := timeutils.RFC3339NanoFixed
- if len(value) < len(format) {
- format = format[:len(value)]
- }
- if t, err := time.ParseInLocation(format, value, loc); err == nil {
- v.Set(key, strconv.FormatInt(t.Unix(), 10))
- } else {
- v.Set(key, value)
- }
- }
if *since != "" {
- setTime("since", *since)
+ v.Set("since", timeutils.GetTimestamp(*since))
}
if *until != "" {
- setTime("until", *until)
+ v.Set("until", timeutils.GetTimestamp(*until))
}
if len(eventFilterArgs) > 0 {
filterJSON, err := filters.ToParam(eventFilterArgs)
@@ -63,7 +49,11 @@ func (cli *DockerCli) CmdEvents(args ...string) error {
}
v.Set("filters", filterJSON)
}
- if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
+ sopts := &streamOpts{
+ rawTerminal: true,
+ out: cli.out,
+ }
+ if err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil {
return err
}
return nil
diff --git a/api/client/exec.go b/api/client/exec.go
index 4bf53eaec2..f247ec5217 100644
--- a/api/client/exec.go
+++ b/api/client/exec.go
@@ -71,7 +71,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
defer func() {
logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
- logrus.Errorf("Hijack did not finish (chan still open)")
+ fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
}
}()
@@ -109,7 +109,7 @@ func (cli *DockerCli) CmdExec(args ...string) error {
if execConfig.Tty && cli.isTerminalIn {
if err := cli.monitorTtySize(execID, true); err != nil {
- logrus.Errorf("Error monitoring TTY size: %s", err)
+ fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
}
}
diff --git a/api/client/export.go b/api/client/export.go
index 1ff46f9b57..42b0834739 100644
--- a/api/client/export.go
+++ b/api/client/export.go
@@ -34,7 +34,11 @@ func (cli *DockerCli) CmdExport(args ...string) error {
}
image := cmd.Arg(0)
- if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
+ sopts := &streamOpts{
+ rawTerminal: true,
+ out: output,
+ }
+ if err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil {
return err
}
diff --git a/api/client/help.go b/api/client/help.go
index e95387967a..8e1dc852b7 100644
--- a/api/client/help.go
+++ b/api/client/help.go
@@ -2,7 +2,6 @@ package client
import (
"fmt"
- "os"
flag "github.com/docker/docker/pkg/mflag"
)
@@ -23,12 +22,10 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
- fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
- os.Exit(1)
- } else {
- method("--help")
- return nil
+ return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
}
+ method("--help")
+ return nil
}
flag.Usage()
diff --git a/api/client/import.go b/api/client/import.go
index a6cc4cdc7e..48c56896af 100644
--- a/api/client/import.go
+++ b/api/client/import.go
@@ -54,5 +54,11 @@ func (cli *DockerCli) CmdImport(args ...string) error {
in = cli.in
}
- return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
+ sopts := &streamOpts{
+ rawTerminal: true,
+ in: in,
+ out: cli.out,
+ }
+
+ return cli.stream("POST", "/images/create?"+v.Encode(), sopts)
}
diff --git a/api/client/info.go b/api/client/info.go
index 432ccac40f..9984f23020 100644
--- a/api/client/info.go
+++ b/api/client/info.go
@@ -3,7 +3,6 @@ package client
import (
"encoding/json"
"fmt"
- "os"
"github.com/docker/docker/api/types"
flag "github.com/docker/docker/pkg/mflag"
@@ -45,9 +44,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
fmt.Fprintf(cli.out, "Name: %s\n", info.Name)
fmt.Fprintf(cli.out, "ID: %s\n", info.ID)
- if info.Debug || os.Getenv("DEBUG") != "" {
+ if info.Debug {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
- fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd)
fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines)
fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime)
@@ -90,5 +88,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
}
}
+ if info.ExperimentalBuild {
+ fmt.Fprintf(cli.out, "Experimental: true\n")
+ }
+
return nil
}
diff --git a/api/client/inspect.go b/api/client/inspect.go
index 0f327cb4db..eb8565b823 100644
--- a/api/client/inspect.go
+++ b/api/client/inspect.go
@@ -15,7 +15,6 @@ import (
// CmdInspect displays low-level information on one or more containers or images.
//
// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
-
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
@@ -27,7 +26,6 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
- fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
@@ -61,7 +59,8 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
continue
}
} else {
- dec := json.NewDecoder(bytes.NewReader(obj))
+ rdr := bytes.NewReader(obj)
+ dec := json.NewDecoder(rdr)
if isImage {
inspPtr := types.ImageInspect{}
@@ -71,7 +70,14 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
continue
}
if err := tmpl.Execute(cli.out, inspPtr); err != nil {
- return err
+ rdr.Seek(0, 0)
+ var raw interface{}
+ if err := dec.Decode(&raw); err != nil {
+ return err
+ }
+ if err = tmpl.Execute(cli.out, raw); err != nil {
+ return err
+ }
}
} else {
inspPtr := types.ContainerJSON{}
@@ -81,8 +87,14 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
continue
}
if err := tmpl.Execute(cli.out, inspPtr); err != nil {
- return err
-
+ rdr.Seek(0, 0)
+ var raw interface{}
+ if err := dec.Decode(&raw); err != nil {
+ return err
+ }
+ if err = tmpl.Execute(cli.out, raw); err != nil {
+ return err
+ }
}
}
cli.out.Write([]byte{'\n'})
diff --git a/api/client/kill.go b/api/client/kill.go
index 7ad1e56133..becff3b7e0 100644
--- a/api/client/kill.go
+++ b/api/client/kill.go
@@ -16,14 +16,17 @@ func (cli *DockerCli) CmdKill(args ...string) error {
cmd.ParseFlags(args, true)
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to kill containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/load.go b/api/client/load.go
index 7338c770d6..8dd8bb5469 100644
--- a/api/client/load.go
+++ b/api/client/load.go
@@ -29,7 +29,12 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
return err
}
}
- if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
+ sopts := &streamOpts{
+ rawTerminal: true,
+ in: input,
+ out: cli.out,
+ }
+ if err := cli.stream("POST", "/images/load", sopts); err != nil {
return err
}
return nil
diff --git a/api/client/logs.go b/api/client/logs.go
index 5e5dd9dd8b..00369dbd86 100644
--- a/api/client/logs.go
+++ b/api/client/logs.go
@@ -7,6 +7,7 @@ import (
"github.com/docker/docker/api/types"
flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/timeutils"
)
// CmdLogs fetches the logs of a given container.
@@ -16,6 +17,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
var (
cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
+ since = cmd.String([]string{"-since"}, "", "Show logs since timestamp")
times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
tail = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
)
@@ -35,14 +37,18 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
return err
}
- if c.HostConfig.LogConfig.Type != "json-file" {
- return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver")
+ if logType := c.HostConfig.LogConfig.Type; logType != "json-file" {
+ return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType)
}
v := url.Values{}
v.Set("stdout", "1")
v.Set("stderr", "1")
+ if *since != "" {
+ v.Set("since", timeutils.GetTimestamp(*since))
+ }
+
if *times {
v.Set("timestamps", "1")
}
@@ -52,5 +58,11 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
}
v.Set("tail", *tail)
- return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), c.Config.Tty, nil, cli.out, cli.err, nil)
+ sopts := &streamOpts{
+ rawTerminal: c.Config.Tty,
+ out: cli.out,
+ err: cli.err,
+ }
+
+ return cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts)
}
diff --git a/api/client/pause.go b/api/client/pause.go
index 6c807410ba..2f8f3c83f1 100644
--- a/api/client/pause.go
+++ b/api/client/pause.go
@@ -14,14 +14,17 @@ func (cli *DockerCli) CmdPause(args ...string) error {
cmd.Require(flag.Min, 1)
cmd.ParseFlags(args, false)
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to pause container named %s", name)
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to pause containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/pull.go b/api/client/pull.go
index 17abe4bb65..4be30b4e6f 100644
--- a/api/client/pull.go
+++ b/api/client/pull.go
@@ -4,7 +4,7 @@ import (
"fmt"
"net/url"
- "github.com/docker/docker/graph"
+ "github.com/docker/docker/graph/tags"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/registry"
@@ -28,7 +28,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
)
taglessRemote, tag := parsers.ParseRepositoryTag(remote)
if tag == "" && !*allTags {
- newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG)
+ newRemote = utils.ImageReference(taglessRemote, tags.DEFAULTTAG)
}
if tag != "" && *allTags {
return fmt.Errorf("tag can't be used with --all-tags/-a")
diff --git a/api/client/restart.go b/api/client/restart.go
index 41b10676bd..c769fb6d27 100644
--- a/api/client/restart.go
+++ b/api/client/restart.go
@@ -21,15 +21,18 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to restart containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/rm.go b/api/client/rm.go
index 1ecc0d6572..e6f3aeaeba 100644
--- a/api/client/rm.go
+++ b/api/client/rm.go
@@ -3,6 +3,7 @@ package client
import (
"fmt"
"net/url"
+ "strings"
flag "github.com/docker/docker/pkg/mflag"
)
@@ -31,19 +32,23 @@ func (cli *DockerCli) CmdRm(args ...string) error {
val.Set("force", "1")
}
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
if name == "" {
return fmt.Errorf("Container name cannot be empty")
}
+ name = strings.Trim(name, "/")
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to remove containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/rmi.go b/api/client/rmi.go
index a8590dc820..36f2036d13 100644
--- a/api/client/rmi.go
+++ b/api/client/rmi.go
@@ -29,17 +29,17 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
v.Set("noprune", "1")
}
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
rdr, _, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to remove one or more images")
+ errNames = append(errNames, name)
} else {
dels := []types.ImageDelete{}
if err := json.NewDecoder(rdr).Decode(&dels); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to remove one or more images")
+ errNames = append(errNames, name)
continue
}
@@ -52,5 +52,8 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
}
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to remove images: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/run.go b/api/client/run.go
index 628e725f1b..10cf92459d 100644
--- a/api/client/run.go
+++ b/api/client/run.go
@@ -9,9 +9,9 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/promise"
- "github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/runconfig"
+ "github.com/docker/libnetwork/resolvconf/dns"
)
func (cid *cidFile) Close() error {
@@ -38,7 +38,6 @@ func (cid *cidFile) Write(id string) error {
//
// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
func (cli *DockerCli) CmdRun(args ...string) error {
- // FIXME: just use runconfig.Parse already
cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true)
// These are flags not stored in Config/HostConfig
@@ -65,7 +64,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
// localhost regexp to warn if they are trying to
// set a DNS to a localhost address
for _, dnsIP := range hostConfig.Dns {
- if resolvconf.IsLocalhost(dnsIP) {
+ if dns.IsLocalhost(dnsIP) {
fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
break
}
@@ -123,7 +122,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
}()
}
- if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
+ if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
return ErrConflictRestartPolicyAndAutoRemove
}
// We need to instantiate the chan because the select needs it. It can
@@ -133,7 +132,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
defer func() {
logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
- logrus.Errorf("Hijack did not finish (chan still open)")
+ fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
@@ -183,7 +182,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
defer func() {
if *flAutoRemove {
if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
- logrus.Errorf("Error deleting container: %s", err)
+ fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
}
}
}()
@@ -195,7 +194,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
- logrus.Errorf("Error monitoring TTY size: %s", err)
+ fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
}
}
diff --git a/api/client/save.go b/api/client/save.go
index 5d9d276153..a04cbcf1e9 100644
--- a/api/client/save.go
+++ b/api/client/save.go
@@ -34,9 +34,14 @@ func (cli *DockerCli) CmdSave(args ...string) error {
return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
}
+ sopts := &streamOpts{
+ rawTerminal: true,
+ out: output,
+ }
+
if len(cmd.Args()) == 1 {
image := cmd.Arg(0)
- if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
+ if err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil {
return err
}
} else {
@@ -44,7 +49,7 @@ func (cli *DockerCli) CmdSave(args ...string) error {
for _, arg := range cmd.Args() {
v.Add("names", arg)
}
- if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil {
+ if err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil {
return err
}
}
diff --git a/api/client/start.go b/api/client/start.go
index d3dec9489d..40f84d7cf3 100644
--- a/api/client/start.go
+++ b/api/client/start.go
@@ -1,13 +1,14 @@
package client
import (
+ "encoding/json"
"fmt"
"io"
"net/url"
"os"
"github.com/Sirupsen/logrus"
- "github.com/docker/docker/engine"
+ "github.com/docker/docker/api/types"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/signal"
@@ -29,7 +30,7 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
}
}
if sig == "" {
- logrus.Errorf("Unsupported signal: %v. Discarding.", s)
+ fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s)
}
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
logrus.Debugf("Error sending signal: %s", err)
@@ -65,12 +66,12 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return err
}
- env := engine.Env{}
- if err := env.Decode(stream); err != nil {
+ var c types.ContainerJSON
+ if err := json.NewDecoder(stream).Decode(&c); err != nil {
return err
}
- config := env.GetSubEnv("Config")
- tty = config.GetBool("Tty")
+
+ tty = c.Config.Tty
if !tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
@@ -82,7 +83,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
v := url.Values{}
v.Set("stream", "1")
- if *openStdin && config.GetBool("OpenStdin") {
+ if *openStdin && c.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
@@ -95,7 +96,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
defer func() {
logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
- logrus.Errorf("Hijack did not finish (chan still open)")
+ fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
}
cli.in.Close()
}()
@@ -119,6 +120,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
}
var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
if err != nil {
@@ -126,7 +128,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
// attach and openStdin is false means it could be starting multiple containers
// when a container start failed, show the error message and start next
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to start one or more containers")
+ errNames = append(errNames, name)
} else {
encounteredError = err
}
@@ -137,6 +139,9 @@ func (cli *DockerCli) CmdStart(args ...string) error {
}
}
+ if len(errNames) > 0 {
+ encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames)
+ }
if encounteredError != nil {
return encounteredError
}
@@ -144,7 +149,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
if *openStdin || *attach {
if tty && cli.isTerminalOut {
if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
- logrus.Errorf("Error monitoring TTY size: %s", err)
+ fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
}
}
if attchErr := <-cErr; attchErr != nil {
diff --git a/api/client/stats.go b/api/client/stats.go
index b2dd36d683..ba56982b9e 100644
--- a/api/client/stats.go
+++ b/api/client/stats.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "net/url"
"sort"
"strings"
"sync"
@@ -27,10 +28,18 @@ type containerStats struct {
err error
}
-func (s *containerStats) Collect(cli *DockerCli) {
- stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, nil)
+func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
+ v := url.Values{}
+ if streamStats {
+ v.Set("stream", "1")
+ } else {
+ v.Set("stream", "0")
+ }
+ stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil)
if err != nil {
+ s.mu.Lock()
s.err = err
+ s.mu.Unlock()
return
}
defer stream.Close()
@@ -67,6 +76,9 @@ func (s *containerStats) Collect(cli *DockerCli) {
previousCPU = v.CpuStats.CpuUsage.TotalUsage
previousSystem = v.CpuStats.SystemUsage
u <- nil
+ if !streamStats {
+ return
+ }
}
}()
for {
@@ -87,6 +99,9 @@ func (s *containerStats) Collect(cli *DockerCli) {
return
}
}
+ if !streamStats {
+ return
+ }
}
}
@@ -112,6 +127,7 @@ func (s *containerStats) Display(w io.Writer) error {
// Usage: docker stats CONTAINER [CONTAINER...]
func (cli *DockerCli) CmdStats(args ...string) error {
cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
+ noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
cmd.Require(flag.Min, 1)
cmd.ParseFlags(args, true)
@@ -122,14 +138,16 @@ func (cli *DockerCli) CmdStats(args ...string) error {
w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
)
printHeader := func() {
- io.WriteString(cli.out, "\033[2J")
- io.WriteString(cli.out, "\033[H")
+ if !*noStream {
+ fmt.Fprint(cli.out, "\033[2J")
+ fmt.Fprint(cli.out, "\033[H")
+ }
io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n")
}
for _, n := range names {
s := &containerStats{Name: n}
cStats = append(cStats, s)
- go s.Collect(cli)
+ go s.Collect(cli, !*noStream)
}
// do a quick pause so that any failed connections for containers that do not exist are able to be
// evicted before we display the initial or default values.
@@ -149,7 +167,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
printHeader()
toRemove := []int{}
for i, s := range cStats {
- if err := s.Display(w); err != nil {
+ if err := s.Display(w); err != nil && !*noStream {
toRemove = append(toRemove, i)
}
}
@@ -161,6 +179,9 @@ func (cli *DockerCli) CmdStats(args ...string) error {
return nil
}
w.Flush()
+ if *noStream {
+ break
+ }
}
return nil
}
diff --git a/api/client/stop.go b/api/client/stop.go
index 08a1f5ba15..9551911ffd 100644
--- a/api/client/stop.go
+++ b/api/client/stop.go
@@ -23,15 +23,18 @@ func (cli *DockerCli) CmdStop(args ...string) error {
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to stop containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/unpause.go b/api/client/unpause.go
index bcecb46336..dceeb23af9 100644
--- a/api/client/unpause.go
+++ b/api/client/unpause.go
@@ -14,14 +14,17 @@ func (cli *DockerCli) CmdUnpause(args ...string) error {
cmd.Require(flag.Min, 1)
cmd.ParseFlags(args, false)
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name)
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to unpause containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/client/utils.go b/api/client/utils.go
index 7a52ad25f4..6fb9b256fd 100644
--- a/api/client/utils.go
+++ b/api/client/utils.go
@@ -22,7 +22,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/cliconfig"
- "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/stdcopy"
@@ -42,18 +41,8 @@ func (cli *DockerCli) HTTPClient() *http.Client {
func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
params := bytes.NewBuffer(nil)
if data != nil {
- if env, ok := data.(engine.Env); ok {
- if err := env.Encode(params); err != nil {
- return nil, err
- }
- } else {
- buf, err := json.Marshal(data)
- if err != nil {
- return nil, err
- }
- if _, err := params.Write(buf); err != nil {
- return nil, err
- }
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
}
}
return params, nil
@@ -181,19 +170,23 @@ func (cli *DockerCli) call(method, path string, data interface{}, headers map[st
return body, statusCode, err
}
-func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
- return cli.streamHelper(method, path, true, in, out, nil, headers)
+type streamOpts struct {
+ rawTerminal bool
+ in io.Reader
+ out io.Writer
+ err io.Writer
+ headers map[string][]string
}
-func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
- body, contentType, _, err := cli.clientRequest(method, path, in, headers)
+func (cli *DockerCli) stream(method, path string, opts *streamOpts) error {
+ body, contentType, _, err := cli.clientRequest(method, path, opts.in, opts.headers)
if err != nil {
return err
}
- return cli.streamBody(body, contentType, setRawTerminal, stdout, stderr)
+ return cli.streamBody(body, contentType, opts.rawTerminal, opts.out, opts.err)
}
-func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawTerminal bool, stdout, stderr io.Writer) error {
+func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error {
defer body.Close()
if api.MatchesContentType(contentType, "application/json") {
@@ -202,7 +195,7 @@ func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawT
if stdout != nil || stderr != nil {
// When TTY is ON, use regular copy
var err error
- if setRawTerminal {
+ if rawTerminal {
_, err = io.Copy(stdout, body)
} else {
_, err = stdcopy.StdCopy(stdout, stderr, body)
diff --git a/api/client/version.go b/api/client/version.go
index 25a7e367e2..4e06a6c8ad 100644
--- a/api/client/version.go
+++ b/api/client/version.go
@@ -1,13 +1,13 @@
package client
import (
+ "encoding/json"
"fmt"
"runtime"
- "github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
+ "github.com/docker/docker/api/types"
"github.com/docker/docker/autogen/dockerversion"
- "github.com/docker/docker/engine"
flag "github.com/docker/docker/pkg/mflag"
)
@@ -32,28 +32,24 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
}
fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
- body, _, err := readBody(cli.call("GET", "/version", nil, nil))
+ stream, _, err := cli.call("GET", "/version", nil, nil)
if err != nil {
return err
}
- out := engine.NewOutput()
- remoteVersion, err := out.AddEnv()
- if err != nil {
- logrus.Errorf("Error reading remote version: %s", err)
+ var v types.Version
+ if err := json.NewDecoder(stream).Decode(&v); err != nil {
+ fmt.Fprintf(cli.err, "Error reading remote version: %s\n", err)
return err
}
- if _, err := out.Write(body); err != nil {
- logrus.Errorf("Error reading remote version: %s", err)
- return err
+
+ fmt.Fprintf(cli.out, "Server version: %s\n", v.Version)
+ if v.ApiVersion != "" {
+ fmt.Fprintf(cli.out, "Server API version: %s\n", v.ApiVersion)
}
- out.Close()
- fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
- if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
- fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
- }
- fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
- fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
- fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch"))
+ fmt.Fprintf(cli.out, "Go version (server): %s\n", v.GoVersion)
+ fmt.Fprintf(cli.out, "Git commit (server): %s\n", v.GitCommit)
+ fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", v.Os, v.Arch)
+
return nil
}
diff --git a/api/client/wait.go b/api/client/wait.go
index 8f34b24521..bfec19e24b 100644
--- a/api/client/wait.go
+++ b/api/client/wait.go
@@ -17,15 +17,18 @@ func (cli *DockerCli) CmdWait(args ...string) error {
cmd.ParseFlags(args, true)
- var encounteredError error
+ var errNames []string
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
+ errNames = append(errNames, name)
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
- return encounteredError
+ if len(errNames) > 0 {
+ return fmt.Errorf("Error: failed to wait containers: %v", errNames)
+ }
+ return nil
}
diff --git a/api/common.go b/api/common.go
index 4a9523cd45..743eb67091 100644
--- a/api/common.go
+++ b/api/common.go
@@ -3,13 +3,13 @@ package api
import (
"fmt"
"mime"
- "os"
"path/filepath"
"sort"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/version"
"github.com/docker/libtrust"
)
@@ -107,7 +107,8 @@ func MatchesContentType(contentType, expectedType string) bool {
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
// otherwise generates a new one
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
- if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+ err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
+ if err != nil {
return nil, err
}
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
diff --git a/api/server/form.go b/api/server/form.go
index af1cd2075e..75584df065 100644
--- a/api/server/form.go
+++ b/api/server/form.go
@@ -11,7 +11,7 @@ func boolValue(r *http.Request, k string) bool {
return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
}
-func int64Value(r *http.Request, k string) int64 {
+func int64ValueOrZero(r *http.Request, k string) int64 {
val, err := strconv.ParseInt(r.FormValue(k), 10, 64)
if err != nil {
return 0
diff --git a/api/server/form_test.go b/api/server/form_test.go
index 5cf6c82c14..caa9f1757c 100644
--- a/api/server/form_test.go
+++ b/api/server/form_test.go
@@ -33,7 +33,7 @@ func TestBoolValue(t *testing.T) {
}
}
-func TestInt64Value(t *testing.T) {
+func TestInt64ValueOrZero(t *testing.T) {
cases := map[string]int64{
"": 0,
"asdf": 0,
@@ -47,7 +47,7 @@ func TestInt64Value(t *testing.T) {
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
- a := int64Value(r, "test")
+ a := int64ValueOrZero(r, "test")
if a != e {
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
}
diff --git a/api/server/server.go b/api/server/server.go
index f962aa30d7..57ebebc0e0 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -1,9 +1,6 @@
package server
import (
- "runtime"
- "time"
-
"encoding/base64"
"encoding/json"
"fmt"
@@ -11,8 +8,10 @@ import (
"net"
"net/http"
"os"
+ "runtime"
"strconv"
"strings"
+ "time"
"code.google.com/p/go.net/websocket"
"github.com/gorilla/mux"
@@ -24,19 +23,20 @@ import (
"github.com/docker/docker/builder"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
- "github.com/docker/docker/daemon/networkdriver/bridge"
- "github.com/docker/docker/engine"
"github.com/docker/docker/graph"
+ "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/filters"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/signal"
+ "github.com/docker/docker/pkg/sockets"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/version"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
+ "github.com/docker/libnetwork/portallocator"
)
type ServerConfig struct {
@@ -53,28 +53,29 @@ type ServerConfig struct {
}
type Server struct {
- daemon *daemon.Daemon
- cfg *ServerConfig
- router *mux.Router
- start chan struct{}
-
- // TODO: delete engine
- eng *engine.Engine
+ daemon *daemon.Daemon
+ cfg *ServerConfig
+ router *mux.Router
+ start chan struct{}
+ servers []serverCloser
}
-func New(cfg *ServerConfig, eng *engine.Engine) *Server {
+func New(cfg *ServerConfig) *Server {
srv := &Server{
cfg: cfg,
start: make(chan struct{}),
- eng: eng,
}
- r := createRouter(srv, eng)
+ r := createRouter(srv)
srv.router = r
return srv
}
-func (s *Server) SetDaemon(d *daemon.Daemon) {
- s.daemon = d
+func (s *Server) Close() {
+ for _, srv := range s.servers {
+ if err := srv.Close(); err != nil {
+ logrus.Error(err)
+ }
+ }
}
type serverCloser interface {
@@ -92,19 +93,15 @@ func (s *Server) ServeApi(protoAddrs []string) error {
if len(protoAddrParts) != 2 {
return fmt.Errorf("bad format, expected PROTO://ADDR")
}
+ srv, err := s.newServer(protoAddrParts[0], protoAddrParts[1])
+ if err != nil {
+ return err
+ }
+ s.servers = append(s.servers, srv)
+
go func(proto, addr string) {
logrus.Infof("Listening for HTTP on %s (%s)", proto, addr)
- srv, err := s.newServer(proto, addr)
- if err != nil {
- chErrors <- err
- return
- }
- s.eng.OnShutdown(func() {
- if err := srv.Close(); err != nil {
- logrus.Error(err)
- }
- })
- if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+ if err := srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
err = nil
}
chErrors <- err
@@ -133,7 +130,7 @@ func (s *HttpServer) Close() error {
return s.l.Close()
}
-type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
+type HttpApiFunc func(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
conn, _, err := w.(http.Hijacker).Hijack()
@@ -230,16 +227,7 @@ func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
return json.NewEncoder(w).Encode(v)
}
-func streamJSON(out *engine.Output, w http.ResponseWriter, flush bool) {
- w.Header().Set("Content-Type", "application/json")
- if flush {
- out.Add(utils.NewWriteFlusher(w))
- } else {
- out.Add(w)
- }
-}
-
-func (s *Server) postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postAuth(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var config *cliconfig.AuthConfig
err := json.NewDecoder(r.Body).Decode(&config)
r.Body.Close()
@@ -255,7 +243,7 @@ func (s *Server) postAuth(eng *engine.Engine, version version.Version, w http.Re
})
}
-func (s *Server) getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
v := &types.Version{
@@ -273,7 +261,7 @@ func (s *Server) getVersion(eng *engine.Engine, version version.Version, w http.
return writeJSON(w, http.StatusOK, v)
}
-func (s *Server) postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersKill(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -308,7 +296,7 @@ func (s *Server) postContainersKill(eng *engine.Engine, version version.Version,
return nil
}
-func (s *Server) postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersPause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -316,23 +304,16 @@ func (s *Server) postContainersPause(eng *engine.Engine, version version.Version
return err
}
- name := vars["name"]
- cont, err := s.daemon.Get(name)
- if err != nil {
+ if err := s.daemon.ContainerPause(vars["name"]); err != nil {
return err
}
- if err := cont.Pause(); err != nil {
- return fmt.Errorf("Cannot pause container %s: %s", name, err)
- }
- cont.LogEvent("pause")
-
w.WriteHeader(http.StatusNoContent)
return nil
}
-func (s *Server) postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersUnpause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -340,23 +321,16 @@ func (s *Server) postContainersUnpause(eng *engine.Engine, version version.Versi
return err
}
- name := vars["name"]
- cont, err := s.daemon.Get(name)
- if err != nil {
+ if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
return err
}
- if err := cont.Unpause(); err != nil {
- return fmt.Errorf("Cannot unpause container %s: %s", name, err)
- }
- cont.LogEvent("unpause")
-
w.WriteHeader(http.StatusNoContent)
return nil
}
-func (s *Server) getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersExport(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -364,7 +338,7 @@ func (s *Server) getContainersExport(eng *engine.Engine, version version.Version
return s.daemon.ContainerExport(vars["name"], w)
}
-func (s *Server) getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -381,31 +355,10 @@ func (s *Server) getImagesJSON(eng *engine.Engine, version version.Version, w ht
return err
}
- if version.GreaterThanOrEqualTo("1.7") {
- return writeJSON(w, http.StatusOK, images)
- }
-
- legacyImages := []types.LegacyImage{}
-
- for _, image := range images {
- for _, repoTag := range image.RepoTags {
- repo, tag := parsers.ParseRepositoryTag(repoTag)
- legacyImage := types.LegacyImage{
- Repository: repo,
- Tag: tag,
- ID: image.ID,
- Created: image.Created,
- Size: image.Size,
- VirtualSize: image.VirtualSize,
- }
- legacyImages = append(legacyImages, legacyImage)
- }
- }
-
- return writeJSON(w, http.StatusOK, legacyImages)
+ return writeJSON(w, http.StatusOK, images)
}
-func (s *Server) getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getInfo(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
info, err := s.daemon.SystemInfo()
@@ -416,7 +369,7 @@ func (s *Server) getInfo(eng *engine.Engine, version version.Version, w http.Res
return writeJSON(w, http.StatusOK, info)
}
-func (s *Server) getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -470,7 +423,7 @@ func (s *Server) getEvents(eng *engine.Engine, version version.Version, w http.R
d := s.daemon
es := d.EventsService
w.Header().Set("Content-Type", "application/json")
- enc := json.NewEncoder(utils.NewWriteFlusher(w))
+ enc := json.NewEncoder(ioutils.NewWriteFlusher(w))
getContainerId := func(cn string) string {
c, err := d.Get(cn)
@@ -520,7 +473,7 @@ func (s *Server) getEvents(eng *engine.Engine, version version.Version, w http.R
}
}
-func (s *Server) getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesHistory(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -534,18 +487,12 @@ func (s *Server) getImagesHistory(eng *engine.Engine, version version.Version, w
return writeJSON(w, http.StatusOK, history)
}
-func (s *Server) getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersChanges(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- name := vars["name"]
- cont, err := s.daemon.Get(name)
- if err != nil {
- return err
- }
-
- changes, err := cont.Changes()
+ changes, err := s.daemon.ContainerChanges(vars["name"])
if err != nil {
return err
}
@@ -553,11 +500,7 @@ func (s *Server) getContainersChanges(eng *engine.Engine, version version.Versio
return writeJSON(w, http.StatusOK, changes)
}
-func (s *Server) getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
- if version.LessThan("1.4") {
- return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.")
- }
-
+func (s *Server) getContainersTop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -574,7 +517,7 @@ func (s *Server) getContainersTop(eng *engine.Engine, version version.Version, w
return writeJSON(w, http.StatusOK, procList)
}
-func (s *Server) getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -603,7 +546,7 @@ func (s *Server) getContainersJSON(eng *engine.Engine, version version.Version,
return writeJSON(w, http.StatusOK, containers)
}
-func (s *Server) getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -611,10 +554,10 @@ func (s *Server) getContainersStats(eng *engine.Engine, version version.Version,
return fmt.Errorf("Missing parameter")
}
- return s.daemon.ContainerStats(vars["name"], utils.NewWriteFlusher(w))
+ return s.daemon.ContainerStats(vars["name"], boolValue(r, "stream"), ioutils.NewWriteFlusher(w))
}
-func (s *Server) getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -628,13 +571,23 @@ func (s *Server) getContainersLogs(eng *engine.Engine, version version.Version,
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
+ var since time.Time
+ if r.Form.Get("since") != "" {
+ s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64)
+ if err != nil {
+ return err
+ }
+ since = time.Unix(s, 0)
+ }
+
logsConfig := &daemon.ContainerLogsConfig{
Follow: boolValue(r, "follow"),
Timestamps: boolValue(r, "timestamps"),
+ Since: since,
Tail: r.Form.Get("tail"),
UseStdout: stdout,
UseStderr: stderr,
- OutStream: utils.NewWriteFlusher(w),
+ OutStream: ioutils.NewWriteFlusher(w),
}
if err := s.daemon.ContainerLogs(vars["name"], logsConfig); err != nil {
@@ -644,7 +597,7 @@ func (s *Server) getContainersLogs(eng *engine.Engine, version version.Version,
return nil
}
-func (s *Server) postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesTag(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -655,14 +608,16 @@ func (s *Server) postImagesTag(eng *engine.Engine, version version.Version, w ht
repo := r.Form.Get("repo")
tag := r.Form.Get("tag")
force := boolValue(r, "force")
- if err := s.daemon.Repositories().Tag(repo, tag, vars["name"], force); err != nil {
+ name := vars["name"]
+ if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
return err
}
+ s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
w.WriteHeader(http.StatusCreated)
return nil
}
-func (s *Server) postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postCommit(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -708,7 +663,7 @@ func (s *Server) postCommit(eng *engine.Engine, version version.Version, w http.
}
// Creates an image from Pull or from Import
-func (s *Server) postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -730,13 +685,11 @@ func (s *Server) postImagesCreate(eng *engine.Engine, version version.Version, w
}
var (
- opErr error
- useJSON = version.GreaterThan("1.0")
+ err error
+ output = ioutils.NewWriteFlusher(w)
)
- if useJSON {
- w.Header().Set("Content-Type", "application/json")
- }
+ w.Header().Set("Content-Type", "application/json")
if image != "" { //pull
if tag == "" {
@@ -750,14 +703,12 @@ func (s *Server) postImagesCreate(eng *engine.Engine, version version.Version, w
}
imagePullConfig := &graph.ImagePullConfig{
- Parallel: version.GreaterThan("1.3"),
MetaHeaders: metaHeaders,
AuthConfig: authConfig,
- OutStream: utils.NewWriteFlusher(w),
- Json: useJSON,
+ OutStream: output,
}
- opErr = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
+ err = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
} else { //import
if tag == "" {
repo, tag = parsers.ParseRepositoryTag(repo)
@@ -767,28 +718,34 @@ func (s *Server) postImagesCreate(eng *engine.Engine, version version.Version, w
imageImportConfig := &graph.ImageImportConfig{
Changes: r.Form["changes"],
InConfig: r.Body,
- OutStream: utils.NewWriteFlusher(w),
- Json: useJSON,
+ OutStream: output,
}
- newConfig, err := builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes)
+ // 'err' MUST NOT be defined within this block, we need any error
+ // generated from the download to be available to the output
+ // stream processing below
+ var newConfig *runconfig.Config
+ newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes)
if err != nil {
return err
}
imageImportConfig.ContainerConfig = newConfig
- opErr = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig)
+ err = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig)
}
-
- if opErr != nil {
- sf := streamformatter.NewStreamFormatter(useJSON)
- return fmt.Errorf(string(sf.FormatError(opErr)))
+ if err != nil {
+ if !output.Flushed() {
+ return err
+ }
+ sf := streamformatter.NewJSONStreamFormatter()
+ output.Write(sf.FormatError(err))
}
return nil
+
}
-func (s *Server) getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesSearch(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -818,7 +775,7 @@ func (s *Server) getImagesSearch(eng *engine.Engine, version version.Version, w
return json.NewEncoder(w).Encode(query.Results)
}
-func (s *Server) postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -849,33 +806,29 @@ func (s *Server) postImagesPush(eng *engine.Engine, version version.Version, w h
}
}
- useJSON := version.GreaterThan("1.0")
name := vars["name"]
-
- output := utils.NewWriteFlusher(w)
+ output := ioutils.NewWriteFlusher(w)
imagePushConfig := &graph.ImagePushConfig{
MetaHeaders: metaHeaders,
AuthConfig: authConfig,
Tag: r.Form.Get("tag"),
OutStream: output,
- Json: useJSON,
- }
- if useJSON {
- w.Header().Set("Content-Type", "application/json")
}
+ w.Header().Set("Content-Type", "application/json")
+
if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
if !output.Flushed() {
return err
}
- sf := streamformatter.NewStreamFormatter(useJSON)
+ sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
-func (s *Server) getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -883,12 +836,9 @@ func (s *Server) getImagesGet(eng *engine.Engine, version version.Version, w htt
return err
}
- useJSON := version.GreaterThan("1.0")
- if useJSON {
- w.Header().Set("Content-Type", "application/x-tar")
- }
+ w.Header().Set("Content-Type", "application/x-tar")
- output := utils.NewWriteFlusher(w)
+ output := ioutils.NewWriteFlusher(w)
imageExportConfig := &graph.ImageExportConfig{Outstream: output}
if name, ok := vars["name"]; ok {
imageExportConfig.Names = []string{name}
@@ -900,18 +850,18 @@ func (s *Server) getImagesGet(eng *engine.Engine, version version.Version, w htt
if !output.Flushed() {
return err
}
- sf := streamformatter.NewStreamFormatter(useJSON)
+ sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
-func (s *Server) postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesLoad(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return s.daemon.Repositories().Load(r.Body, w)
}
-func (s *Server) postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
@@ -939,7 +889,7 @@ func (s *Server) postContainersCreate(eng *engine.Engine, version version.Versio
})
}
-func (s *Server) postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersRestart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -947,10 +897,7 @@ func (s *Server) postContainersRestart(eng *engine.Engine, version version.Versi
return fmt.Errorf("Missing parameter")
}
- timeout, err := strconv.Atoi(r.Form.Get("t"))
- if err != nil {
- return err
- }
+ timeout, _ := strconv.Atoi(r.Form.Get("t"))
if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
return err
@@ -961,7 +908,7 @@ func (s *Server) postContainersRestart(eng *engine.Engine, version version.Versi
return nil
}
-func (s *Server) postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerRename(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -978,7 +925,7 @@ func (s *Server) postContainerRename(eng *engine.Engine, version version.Version
return nil
}
-func (s *Server) deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) deleteContainers(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1006,7 +953,7 @@ func (s *Server) deleteContainers(eng *engine.Engine, version version.Version, w
return nil
}
-func (s *Server) deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) deleteImages(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1026,7 +973,7 @@ func (s *Server) deleteImages(eng *engine.Engine, version version.Version, w htt
return writeJSON(w, http.StatusOK, list)
}
-func (s *Server) postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -1062,7 +1009,7 @@ func (s *Server) postContainersStart(eng *engine.Engine, version version.Version
return nil
}
-func (s *Server) postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersStop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1070,10 +1017,7 @@ func (s *Server) postContainersStop(eng *engine.Engine, version version.Version,
return fmt.Errorf("Missing parameter")
}
- seconds, err := strconv.Atoi(r.Form.Get("t"))
- if err != nil {
- return err
- }
+ seconds, _ := strconv.Atoi(r.Form.Get("t"))
if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
if err.Error() == "Container already stopped" {
@@ -1087,25 +1031,22 @@ func (s *Server) postContainersStop(eng *engine.Engine, version version.Version,
return nil
}
-func (s *Server) postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersWait(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- name := vars["name"]
- cont, err := s.daemon.Get(name)
+ status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second)
if err != nil {
return err
}
- status, _ := cont.WaitStop(-1 * time.Second)
-
return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{
StatusCode: status,
})
}
-func (s *Server) postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1122,15 +1063,10 @@ func (s *Server) postContainersResize(eng *engine.Engine, version version.Versio
return err
}
- cont, err := s.daemon.Get(vars["name"])
- if err != nil {
- return err
- }
-
- return cont.Resize(height, width)
+ return s.daemon.ContainerResize(vars["name"], height, width)
}
-func (s *Server) postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1138,71 +1074,56 @@ func (s *Server) postContainersAttach(eng *engine.Engine, version version.Versio
return fmt.Errorf("Missing parameter")
}
- cont, err := s.daemon.Get(vars["name"])
- if err != nil {
- return err
- }
-
inStream, outStream, err := hijackServer(w)
if err != nil {
return err
}
defer closeStreams(inStream, outStream)
- var errStream io.Writer
-
if _, ok := r.Header["Upgrade"]; ok {
fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
} else {
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
}
- if !cont.Config.Tty && version.GreaterThanOrEqualTo("1.6") {
- errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
- outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
- } else {
- errStream = outStream
- }
- logs := boolValue(r, "logs")
- stream := boolValue(r, "stream")
-
- var stdin io.ReadCloser
- var stdout, stderr io.Writer
-
- if boolValue(r, "stdin") {
- stdin = inStream
- }
- if boolValue(r, "stdout") {
- stdout = outStream
- }
- if boolValue(r, "stderr") {
- stderr = errStream
+ attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{
+ InStream: inStream,
+ OutStream: outStream,
+ UseStdin: boolValue(r, "stdin"),
+ UseStdout: boolValue(r, "stdout"),
+ UseStderr: boolValue(r, "stderr"),
+ Logs: boolValue(r, "logs"),
+ Stream: boolValue(r, "stream"),
+ Multiplex: version.GreaterThanOrEqualTo("1.6"),
}
- if err := cont.AttachWithLogs(stdin, stdout, stderr, logs, stream); err != nil {
+ if err := s.daemon.ContainerAttachWithLogs(vars["name"], attachWithLogsConfig); err != nil {
fmt.Fprintf(outStream, "Error attaching: %s\n", err)
}
+
return nil
}
-func (s *Server) wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) wsContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- cont, err := s.daemon.Get(vars["name"])
- if err != nil {
- return err
- }
h := websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
- logs := r.Form.Get("logs") != ""
- stream := r.Form.Get("stream") != ""
- if err := cont.AttachWithLogs(ws, ws, ws, logs, stream); err != nil {
+ wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{
+ InStream: ws,
+ OutStream: ws,
+ ErrStream: ws,
+ Logs: boolValue(r, "logs"),
+ Stream: boolValue(r, "stream"),
+ }
+
+ if err := s.daemon.ContainerWsAttachWithLogs(vars["name"], wsAttachWithLogsConfig); err != nil {
logrus.Errorf("Error attaching websocket: %s", err)
}
})
@@ -1211,28 +1132,19 @@ func (s *Server) wsContainersAttach(eng *engine.Engine, version version.Version,
return nil
}
-func (s *Server) getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- name := vars["name"]
-
- if version.LessThan("1.12") {
- containerJSONRaw, err := s.daemon.ContainerInspectRaw(name)
- if err != nil {
- return err
- }
- return writeJSON(w, http.StatusOK, containerJSONRaw)
- }
- containerJSON, err := s.daemon.ContainerInspect(name)
+ containerJSON, err := s.daemon.ContainerInspect(vars["name"])
if err != nil {
return err
}
return writeJSON(w, http.StatusOK, containerJSON)
}
-func (s *Server) getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getExecByID(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter 'id'")
}
@@ -1245,22 +1157,12 @@ func (s *Server) getExecByID(eng *engine.Engine, version version.Version, w http
return writeJSON(w, http.StatusOK, eConfig)
}
-func (s *Server) getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- name := vars["name"]
- if version.LessThan("1.12") {
- imageInspectRaw, err := s.daemon.Repositories().LookupRaw(name)
- if err != nil {
- return err
- }
-
- return writeJSON(w, http.StatusOK, imageInspectRaw)
- }
-
- imageInspect, err := s.daemon.Repositories().Lookup(name)
+ imageInspect, err := s.daemon.Repositories().Lookup(vars["name"])
if err != nil {
return err
}
@@ -1268,31 +1170,14 @@ func (s *Server) getImagesByName(eng *engine.Engine, version version.Version, w
return writeJSON(w, http.StatusOK, imageInspect)
}
-func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
- if version.LessThan("1.3") {
- return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
- }
+func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
- authEncoded = r.Header.Get("X-Registry-Auth")
authConfig = &cliconfig.AuthConfig{}
configFileEncoded = r.Header.Get("X-Registry-Config")
configFile = &cliconfig.ConfigFile{}
buildConfig = builder.NewBuildConfig()
)
- // This block can be removed when API versions prior to 1.9 are deprecated.
- // Both headers will be parsed and sent along to the daemon, but if a non-empty
- // ConfigFile is present, any value provided as an AuthConfig directly will
- // be overridden. See BuildFile::CmdFrom for details.
- if version.LessThan("1.9") && authEncoded != "" {
- authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
- if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
- // for a pull it is not an error if no auth was given
- // to increase compatibility with the existing api it is defaulting to be empty
- authConfig = &cliconfig.AuthConfig{}
- }
- }
-
if configFileEncoded != "" {
configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded))
if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
@@ -1302,10 +1187,7 @@ func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.R
}
}
- if version.GreaterThanOrEqualTo("1.8") {
- w.Header().Set("Content-Type", "application/json")
- buildConfig.JSONFormat = true
- }
+ w.Header().Set("Content-Type", "application/json")
if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") {
buildConfig.Remove = true
@@ -1318,7 +1200,7 @@ func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.R
buildConfig.Pull = true
}
- output := utils.NewWriteFlusher(w)
+ output := ioutils.NewWriteFlusher(w)
buildConfig.Stdout = output
buildConfig.Context = r.Body
@@ -1330,12 +1212,14 @@ func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.R
buildConfig.ForceRemove = boolValue(r, "forcerm")
buildConfig.AuthConfig = authConfig
buildConfig.ConfigFile = configFile
- buildConfig.MemorySwap = int64Value(r, "memswap")
- buildConfig.Memory = int64Value(r, "memory")
- buildConfig.CpuShares = int64Value(r, "cpushares")
- buildConfig.CpuQuota = int64Value(r, "cpuquota")
+ buildConfig.MemorySwap = int64ValueOrZero(r, "memswap")
+ buildConfig.Memory = int64ValueOrZero(r, "memory")
+ buildConfig.CpuShares = int64ValueOrZero(r, "cpushares")
+ buildConfig.CpuPeriod = int64ValueOrZero(r, "cpuperiod")
+ buildConfig.CpuQuota = int64ValueOrZero(r, "cpuquota")
buildConfig.CpuSetCpus = r.FormValue("cpusetcpus")
buildConfig.CpuSetMems = r.FormValue("cpusetmems")
+ buildConfig.CgroupParent = r.FormValue("cgroupparent")
// Job cancellation. Note: not all job types support this.
if closeNotifier, ok := w.(http.CloseNotifier); ok {
@@ -1357,13 +1241,13 @@ func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.R
if !output.Flushed() {
return err
}
- sf := streamformatter.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8"))
+ sf := streamformatter.NewJSONStreamFormatter()
w.Write(sf.FormatError(err))
}
return nil
}
-func (s *Server) postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if vars == nil {
return fmt.Errorf("Missing parameter")
}
@@ -1381,30 +1265,19 @@ func (s *Server) postContainersCopy(eng *engine.Engine, version version.Version,
return fmt.Errorf("Path cannot be empty")
}
- res := cfg.Resource
-
- if res[0] == '/' {
- res = res[1:]
- }
-
- cont, err := s.daemon.Get(vars["name"])
+ data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource)
if err != nil {
- logrus.Errorf("%v", err)
if strings.Contains(strings.ToLower(err.Error()), "no such id") {
w.WriteHeader(http.StatusNotFound)
return nil
}
- }
-
- data, err := cont.Copy(res)
- if err != nil {
- logrus.Errorf("%v", err)
if os.IsNotExist(err) {
return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"])
}
return err
}
defer data.Close()
+
w.Header().Set("Content-Type", "application/x-tar")
if _, err := io.Copy(w, data); err != nil {
return err
@@ -1413,7 +1286,7 @@ func (s *Server) postContainersCopy(eng *engine.Engine, version version.Version,
return nil
}
-func (s *Server) postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
@@ -1442,7 +1315,7 @@ func (s *Server) postContainerExecCreate(eng *engine.Engine, version version.Ver
}
// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
-func (s *Server) postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
@@ -1474,12 +1347,11 @@ func (s *Server) postContainerExecStart(eng *engine.Engine, version version.Vers
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
}
- if !execStartCheck.Tty && version.GreaterThanOrEqualTo("1.6") {
+ if !execStartCheck.Tty {
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
- } else {
- errStream = outStream
}
+
stdin = inStream
stdout = outStream
stderr = errStream
@@ -1495,7 +1367,7 @@ func (s *Server) postContainerExecStart(eng *engine.Engine, version version.Vers
return nil
}
-func (s *Server) postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
@@ -1515,7 +1387,7 @@ func (s *Server) postContainerExecResize(eng *engine.Engine, version version.Ver
return s.daemon.ContainerExecResize(vars["name"], height, width)
}
-func (s *Server) optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) optionsHandler(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.WriteHeader(http.StatusOK)
return nil
}
@@ -1526,12 +1398,32 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
}
-func (s *Server) ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) ping(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
_, err := w.Write([]byte{'O', 'K'})
return err
}
-func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
+func (s *Server) initTcpSocket(addr string) (l net.Listener, err error) {
+ if !s.cfg.TlsVerify {
+ logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+ }
+
+ var c *sockets.TlsConfig
+ if s.cfg.Tls || s.cfg.TlsVerify {
+ c = sockets.NewTlsConfig(s.cfg.TlsCert, s.cfg.TlsKey, s.cfg.TlsCa, s.cfg.TlsVerify)
+ }
+
+ if l, err = sockets.NewTcpSocket(addr, c, s.start); err != nil {
+ return nil, err
+ }
+ if err := allocateDaemonPort(addr); err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+func makeHttpHandler(logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// log the request
logrus.Debugf("Calling %s %s", localMethod, localRoute)
@@ -1555,11 +1447,11 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
}
if version.GreaterThan(api.APIVERSION) {
- http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
+ http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, api.APIVERSION).Error(), http.StatusBadRequest)
return
}
- if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
+ if err := handlerFunc(version, w, r, mux.Vars(r)); err != nil {
logrus.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
httpError(w, err)
}
@@ -1567,7 +1459,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
}
// we keep enableCors just for legacy usage, need to be removed in the future
-func createRouter(s *Server, eng *engine.Engine) *mux.Router {
+func createRouter(s *Server) *mux.Router {
r := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
ProfilerSetup(r, "/debug/")
@@ -1644,7 +1536,7 @@ func createRouter(s *Server, eng *engine.Engine) *mux.Router {
localMethod := method
// build the handler function
- f := makeHttpHandler(eng, s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version))
+ f := makeHttpHandler(s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version))
// add the new route
if localRoute == "" {
@@ -1659,23 +1551,6 @@ func createRouter(s *Server, eng *engine.Engine) *mux.Router {
return r
}
-// ServeRequest processes a single http request to the docker remote api.
-// FIXME: refactor this to be part of Server and not require re-creating a new
-// router each time. This requires first moving ListenAndServe into Server.
-func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) {
- cfg := &ServerConfig{
- EnableCors: true,
- Version: string(apiversion),
- }
- api := New(cfg, eng)
- daemon, _ := eng.HackGetGlobalVar("httpapi.daemon").(*daemon.Daemon)
- api.AcceptConnections(daemon)
- router := createRouter(api, eng)
- // Insert APIVERSION into the request as a convenience
- req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
- router.ServeHTTP(w, req)
-}
-
func allocateDaemonPort(addr string) error {
host, port, err := net.SplitHostPort(addr)
if err != nil {
@@ -1694,8 +1569,9 @@ func allocateDaemonPort(addr string) error {
return fmt.Errorf("failed to lookup %s address in host specification", host)
}
+ pa := portallocator.Get()
for _, hostIP := range hostIPs {
- if _, err := bridge.RequestPort(hostIP, "tcp", intPort); err != nil {
+ if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil {
return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err)
}
}
diff --git a/api/server/server_linux.go b/api/server/server_linux.go
index 43f0eefe0e..a0cfee1f96 100644
--- a/api/server/server_linux.go
+++ b/api/server/server_linux.go
@@ -7,8 +7,8 @@ import (
"net"
"net/http"
- "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon"
+ "github.com/docker/docker/pkg/sockets"
"github.com/docker/docker/pkg/systemd"
)
@@ -45,17 +45,12 @@ func (s *Server) newServer(proto, addr string) (serverCloser, error) {
}
return nil, nil
case "tcp":
- if !s.cfg.TlsVerify {
- logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
- }
- if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg), s.start); err != nil {
- return nil, err
- }
- if err := allocateDaemonPort(addr); err != nil {
+ l, err = s.initTcpSocket(addr)
+ if err != nil {
return nil, err
}
case "unix":
- if l, err = NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
+ if l, err = sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
return nil, err
}
default:
diff --git a/api/server/server_windows.go b/api/server/server_windows.go
index c121bbd3e8..9fa5ab64a1 100644
--- a/api/server/server_windows.go
+++ b/api/server/server_windows.go
@@ -7,7 +7,6 @@ import (
"net"
"net/http"
- "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon"
)
@@ -19,13 +18,8 @@ func (s *Server) newServer(proto, addr string) (Server, error) {
)
switch proto {
case "tcp":
- if !s.cfg.TlsVerify {
- logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
- }
- if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg)); err != nil {
- return nil, err
- }
- if err := allocateDaemonPort(addr); err != nil {
+ l, err = s.initTcpSocket(addr)
+ if err != nil {
return nil, err
}
default:
diff --git a/api/types/types.go b/api/types/types.go
index 7c31065460..457808f5e9 100644
--- a/api/types/types.go
+++ b/api/types/types.go
@@ -92,15 +92,6 @@ type ImageInspect struct {
VirtualSize int64
}
-type LegacyImage struct {
- ID string `json:"Id"`
- Repository string
- Tag string
- Created int
- Size int
- VirtualSize int
-}
-
// GET "/containers/json"
type Port struct {
IP string
@@ -152,10 +143,12 @@ type Info struct {
DriverStatus [][2]string
MemoryLimit bool
SwapLimit bool
+ CpuCfsPeriod bool
CpuCfsQuota bool
IPv4Forwarding bool
Debug bool
NFd int
+ OomKillDisable bool
NGoroutines int
SystemTime string
ExecutionDriver string
@@ -175,6 +168,7 @@ type Info struct {
NoProxy string
Name string
Labels []string
+ ExperimentalBuild bool
}
// This struct is a temp struct used by execStart
diff --git a/builder/bflag.go b/builder/bflag.go
new file mode 100644
index 0000000000..a6a2ba3a6f
--- /dev/null
+++ b/builder/bflag.go
@@ -0,0 +1,155 @@
+package builder
+
+import (
+ "fmt"
+ "strings"
+)
+
+type FlagType int
+
+const (
+ boolType FlagType = iota
+ stringType
+)
+
+type BuilderFlags struct {
+ Args []string // actual flags/args from cmd line
+ flags map[string]*Flag
+ used map[string]*Flag
+ Err error
+}
+
+type Flag struct {
+ bf *BuilderFlags
+ name string
+ flagType FlagType
+ Value string
+}
+
+func NewBuilderFlags() *BuilderFlags {
+ return &BuilderFlags{
+ flags: make(map[string]*Flag),
+ used: make(map[string]*Flag),
+ }
+}
+
+func (bf *BuilderFlags) AddBool(name string, def bool) *Flag {
+ flag := bf.addFlag(name, boolType)
+ if flag == nil {
+ return nil
+ }
+ if def {
+ flag.Value = "true"
+ } else {
+ flag.Value = "false"
+ }
+ return flag
+}
+
+func (bf *BuilderFlags) AddString(name string, def string) *Flag {
+ flag := bf.addFlag(name, stringType)
+ if flag == nil {
+ return nil
+ }
+ flag.Value = def
+ return flag
+}
+
+func (bf *BuilderFlags) addFlag(name string, flagType FlagType) *Flag {
+ if _, ok := bf.flags[name]; ok {
+ bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
+ return nil
+ }
+
+ newFlag := &Flag{
+ bf: bf,
+ name: name,
+ flagType: flagType,
+ }
+ bf.flags[name] = newFlag
+
+ return newFlag
+}
+
+func (fl *Flag) IsUsed() bool {
+ if _, ok := fl.bf.used[fl.name]; ok {
+ return true
+ }
+ return false
+}
+
+func (fl *Flag) IsTrue() bool {
+ if fl.flagType != boolType {
+ // Should never get here
+ panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
+ }
+ return fl.Value == "true"
+}
+
+func (bf *BuilderFlags) Parse() error {
+ // If there was an error while defining the possible flags
+ // go ahead and bubble it back up here since we didn't do it
+ // earlier in the processing
+ if bf.Err != nil {
+ return fmt.Errorf("Error setting up flags: %s", bf.Err)
+ }
+
+ for _, arg := range bf.Args {
+ if !strings.HasPrefix(arg, "--") {
+ return fmt.Errorf("Arg should start with -- : %s", arg)
+ }
+
+ if arg == "--" {
+ return nil
+ }
+
+ arg = arg[2:]
+ value := ""
+
+ index := strings.Index(arg, "=")
+ if index >= 0 {
+ value = arg[index+1:]
+ arg = arg[:index]
+ }
+
+ flag, ok := bf.flags[arg]
+ if !ok {
+ return fmt.Errorf("Unknown flag: %s", arg)
+ }
+
+ if _, ok = bf.used[arg]; ok {
+ return fmt.Errorf("Duplicate flag specified: %s", arg)
+ }
+
+ bf.used[arg] = flag
+
+ switch flag.flagType {
+ case boolType:
+ // value == "" is only ok if no "=" was specified
+ if index >= 0 && value == "" {
+ return fmt.Errorf("Missing a value on flag: %s", arg)
+ }
+
+ lower := strings.ToLower(value)
+ if lower == "" {
+ flag.Value = "true"
+ } else if lower == "true" || lower == "false" {
+ flag.Value = lower
+ } else {
+ return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
+ }
+
+ case stringType:
+ if index < 0 {
+ return fmt.Errorf("Missing a value on flag: %s", arg)
+ }
+ flag.Value = value
+
+ default:
+ panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!"))
+ }
+
+ }
+
+ return nil
+}
diff --git a/builder/bflag_test.go b/builder/bflag_test.go
new file mode 100644
index 0000000000..d03a1c3065
--- /dev/null
+++ b/builder/bflag_test.go
@@ -0,0 +1,187 @@
+package builder
+
+import (
+ "testing"
+)
+
+func TestBuilderFlags(t *testing.T) {
+ var expected string
+ var err error
+
+ // ---
+
+ bf := NewBuilderFlags()
+ bf.Args = []string{}
+ if err := bf.Parse(); err != nil {
+ t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ bf.Args = []string{"--"}
+ if err := bf.Parse(); err != nil {
+ t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 := bf.AddString("str1", "")
+ flBool1 := bf.AddBool("bool1", false)
+ bf.Args = []string{}
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if flStr1.IsUsed() == true {
+ t.Fatalf("Test3 - str1 was not used!")
+ }
+ if flBool1.IsUsed() == true {
+ t.Fatalf("Test3 - bool1 was not used!")
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 = bf.AddString("str1", "HI")
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if flStr1.Value != "HI" {
+ t.Fatalf("Str1 was supposed to default to: HI")
+ }
+ if flBool1.IsTrue() {
+ t.Fatalf("Bool1 was supposed to default to: false")
+ }
+ if flStr1.IsUsed() == true {
+ t.Fatalf("Str1 was not used!")
+ }
+ if flBool1.IsUsed() == true {
+ t.Fatalf("Bool1 was not used!")
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 = bf.AddString("str1", "HI")
+ bf.Args = []string{"--str1"}
+
+ if err = bf.Parse(); err == nil {
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 = bf.AddString("str1", "HI")
+ bf.Args = []string{"--str1="}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ expected = ""
+ if flStr1.Value != expected {
+ t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 = bf.AddString("str1", "HI")
+ bf.Args = []string{"--str1=BYE"}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ expected = "BYE"
+ if flStr1.Value != expected {
+ t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool1"}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if !flBool1.IsTrue() {
+ t.Fatalf("Test-b1 Bool1 was supposed to be true")
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool1=true"}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if !flBool1.IsTrue() {
+ t.Fatalf("Test-b2 Bool1 was supposed to be true")
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool1=false"}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if flBool1.IsTrue() {
+ t.Fatalf("Test-b3 Bool1 was supposed to be false")
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool1=false1"}
+
+ if err = bf.Parse(); err == nil {
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool2"}
+
+ if err = bf.Parse(); err == nil {
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
+ }
+
+ // ---
+
+ bf = NewBuilderFlags()
+ flStr1 = bf.AddString("str1", "HI")
+ flBool1 = bf.AddBool("bool1", false)
+ bf.Args = []string{"--bool1", "--str1=BYE"}
+
+ if err = bf.Parse(); err != nil {
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+ }
+
+ if flStr1.Value != "BYE" {
+ t.Fatalf("Teset %s, str1 should be BYE", bf.Args)
+ }
+ if !flBool1.IsTrue() {
+ t.Fatalf("Teset %s, bool1 should be true", bf.Args)
+ }
+}
diff --git a/builder/command/command.go b/builder/command/command.go
index 16544f0267..8e5d980321 100644
--- a/builder/command/command.go
+++ b/builder/command/command.go
@@ -16,7 +16,6 @@ const (
Expose = "expose"
Volume = "volume"
User = "user"
- Insert = "insert"
)
// Commands is list of all Dockerfile commands
@@ -35,5 +34,4 @@ var Commands = map[string]struct{}{
Expose: {},
Volume: {},
User: {},
- Insert: {},
}
diff --git a/builder/dispatchers.go b/builder/dispatchers.go
index e807f1aee1..6d0a30c0c3 100644
--- a/builder/dispatchers.go
+++ b/builder/dispatchers.go
@@ -12,6 +12,7 @@ import (
"io/ioutil"
"path/filepath"
"regexp"
+ "runtime"
"sort"
"strings"
@@ -38,6 +39,9 @@ func nullDispatch(b *Builder, args []string, attributes map[string]bool, origina
// in the dockerfile available from the next statement on via ${foo}.
//
func env(b *Builder, args []string, attributes map[string]bool, original string) error {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("ENV is not supported on Windows.")
+ }
if len(args) == 0 {
return fmt.Errorf("ENV requires at least one argument")
}
@@ -47,6 +51,26 @@ func env(b *Builder, args []string, attributes map[string]bool, original string)
return fmt.Errorf("Bad input to ENV, too many args")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
+ // TODO/FIXME/NOT USED
+ // Just here to show how to use the builder flags stuff within the
+ // context of a builder command. Will remove once we actually add
+ // a builder command to something!
+ /*
+ flBool1 := b.BuilderFlags.AddBool("bool1", false)
+ flStr1 := b.BuilderFlags.AddString("str1", "HI")
+
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
+ fmt.Printf("Bool1:%v\n", flBool1)
+ fmt.Printf("Str1:%v\n", flStr1)
+ */
+
commitStr := "ENV"
for j := 0; j < len(args); j++ {
@@ -81,6 +105,10 @@ func maintainer(b *Builder, args []string, attributes map[string]bool, original
return fmt.Errorf("MAINTAINER requires exactly one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
b.maintainer = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
}
@@ -98,6 +126,10 @@ func label(b *Builder, args []string, attributes map[string]bool, original strin
return fmt.Errorf("Bad input to LABEL, too many args")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
commitStr := "LABEL"
if b.Config.Labels == nil {
@@ -126,6 +158,10 @@ func add(b *Builder, args []string, attributes map[string]bool, original string)
return fmt.Errorf("ADD requires at least two arguments")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
return b.runContextCommand(args, true, true, "ADD")
}
@@ -138,6 +174,10 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, origina
return fmt.Errorf("COPY requires at least two arguments")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
return b.runContextCommand(args, false, false, "COPY")
}
@@ -150,6 +190,10 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
return fmt.Errorf("FROM requires one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
name := args[0]
if name == NoBaseImageSpecifier {
@@ -194,6 +238,10 @@ func onbuild(b *Builder, args []string, attributes map[string]bool, original str
return fmt.Errorf("ONBUILD requires at least one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
switch triggerInstruction {
case "ONBUILD":
@@ -217,6 +265,10 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
return fmt.Errorf("WORKDIR requires exactly one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
workdir := args[0]
if !filepath.IsAbs(workdir) {
@@ -231,10 +283,11 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
-// 'sh -c' in the event there is only one argument. The difference in
-// processing:
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
//
-// RUN echo hi # sh -c echo hi
+// RUN echo hi # sh -c echo hi (Linux)
+// RUN echo hi # cmd /S /C echo hi (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
@@ -242,10 +295,18 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
return fmt.Errorf("Please provide a source image with `from` prior to run")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
args = handleJsonArgs(args, attributes)
if !attributes["json"] {
- args = append([]string{"/bin/sh", "-c"}, args...)
+ if runtime.GOOS != "windows" {
+ args = append([]string{"/bin/sh", "-c"}, args...)
+ } else {
+ args = append([]string{"cmd", "/S /C"}, args...)
+ }
}
runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
@@ -301,10 +362,18 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
// Argument handling is the same as RUN.
//
func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
cmdSlice := handleJsonArgs(args, attributes)
if !attributes["json"] {
- cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+ if runtime.GOOS != "windows" {
+ cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+ } else {
+ cmdSlice = append([]string{"cmd", "/S /C"}, cmdSlice...)
+ }
}
b.Config.Cmd = runconfig.NewCommand(cmdSlice...)
@@ -322,13 +391,17 @@ func cmd(b *Builder, args []string, attributes map[string]bool, original string)
// ENTRYPOINT /usr/sbin/nginx
//
-// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
-// accept the CMD as the arguments to /usr/sbin/nginx.
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
//
// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
// is initialized at NewBuilder time instead of through argument parsing.
//
func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
parsed := handleJsonArgs(args, attributes)
switch {
@@ -340,7 +413,11 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original
b.Config.Entrypoint = nil
default:
// ENTRYPOINT echo hi
- b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0])
+ if runtime.GOOS != "windows" {
+ b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0])
+ } else {
+ b.Config.Entrypoint = runconfig.NewEntrypoint("cmd", "/S /C", parsed[0])
+ }
}
// when setting the entrypoint if a CMD was not explicitly set then
@@ -368,6 +445,10 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri
return fmt.Errorf("EXPOSE requires at least one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
if b.Config.ExposedPorts == nil {
b.Config.ExposedPorts = make(nat.PortSet)
}
@@ -408,10 +489,18 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri
// ENTRYPOINT/CMD at container run time.
//
func user(b *Builder, args []string, attributes map[string]bool, original string) error {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("USER is not supported on Windows.")
+ }
+
if len(args) != 1 {
return fmt.Errorf("USER requires exactly one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
b.Config.User = args[0]
return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
}
@@ -421,10 +510,17 @@ func user(b *Builder, args []string, attributes map[string]bool, original string
// Expose the volume /foo for use. Will also accept the JSON array form.
//
func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
+ if runtime.GOOS == "windows" {
+ return fmt.Errorf("VOLUME is not supported on Windows.")
+ }
if len(args) == 0 {
return fmt.Errorf("VOLUME requires at least one argument")
}
+ if err := b.BuilderFlags.Parse(); err != nil {
+ return err
+ }
+
if b.Config.Volumes == nil {
b.Config.Volumes = map[string]struct{}{}
}
@@ -440,8 +536,3 @@ func volume(b *Builder, args []string, attributes map[string]bool, original stri
}
return nil
}
-
-// INSERT is no longer accepted, but we still parse it.
-func insert(b *Builder, args []string, attributes map[string]bool, original string) error {
- return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
-}
diff --git a/builder/evaluator.go b/builder/evaluator.go
index 9a2b57a8f9..62f86bb889 100644
--- a/builder/evaluator.go
+++ b/builder/evaluator.go
@@ -71,7 +71,6 @@ func init() {
command.Expose: expose,
command.Volume: volume,
command.User: user,
- command.Insert: insert,
}
}
@@ -116,17 +115,20 @@ type Builder struct {
image string // image name for commit processing
maintainer string // maintainer name. could probably be removed.
cmdSet bool // indicates is CMD was set in current Dockerfile
+ BuilderFlags *BuilderFlags // current cmd's BuilderFlags - temporary
context tarsum.TarSum // the context is a tarball that is uploaded by the client
contextPath string // the path of the temporary directory the local context is unpacked to (server side)
noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system.
// Set resource restrictions for build containers
- cpuSetCpus string
- cpuSetMems string
- cpuShares int64
- cpuQuota int64
- memory int64
- memorySwap int64
+ cpuSetCpus string
+ cpuSetMems string
+ cpuShares int64
+ cpuPeriod int64
+ cpuQuota int64
+ cgroupParent string
+ memory int64
+ memorySwap int64
cancelled <-chan struct{} // When closed, job was cancelled.
}
@@ -276,9 +278,14 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
cmd := ast.Value
attrs := ast.Attributes
original := ast.Original
+ flags := ast.Flags
strs := []string{}
msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
if cmd == "onbuild" {
if ast.Next == nil {
return fmt.Errorf("ONBUILD requires at least one argument")
@@ -286,6 +293,11 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
ast = ast.Next.Children[0]
strs = append(strs, ast.Value)
msg += " " + ast.Value
+
+ if len(ast.Flags) > 0 {
+ msg += " " + strings.Join(ast.Flags, " ")
+ }
+
}
// count the number of nodes that we are going to traverse first
@@ -325,6 +337,8 @@ func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
// XXX yes, we skip any cmds that are not valid; the parser should have
// picked these out already.
if f, ok := evaluateTable[cmd]; ok {
+ b.BuilderFlags = NewBuilderFlags()
+ b.BuilderFlags.Args = flags
return f(b, strList, attrs, original)
}
diff --git a/builder/internals.go b/builder/internals.go
index adeadd87c8..9e58bb251f 100644
--- a/builder/internals.go
+++ b/builder/internals.go
@@ -155,6 +155,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
dest,
allowRemote,
allowDecompression,
+ true,
); err != nil {
return err
}
@@ -225,7 +226,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp
return nil
}
-func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
+func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
origPath = origPath[1:]
@@ -350,7 +351,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
}
// Deal with wildcards
- if ContainsWildcards(origPath) {
+ if allowWildcards && ContainsWildcards(origPath) {
for _, fileInfo := range b.context.GetSums() {
if fileInfo.Name() == "" {
continue
@@ -360,7 +361,9 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri
continue
}
- calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
+ // Note we set allowWildcards to false in case the name has
+ // a * in it
+ calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
}
return nil
}
@@ -455,10 +458,8 @@ func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
}
imagePullConfig := &graph.ImagePullConfig{
- Parallel: true,
AuthConfig: pullRegistryAuth,
OutStream: ioutils.NopWriteCloser(b.OutOld),
- Json: b.StreamFormatter.Json(),
}
if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
@@ -552,12 +553,15 @@ func (b *Builder) create() (*daemon.Container, error) {
b.Config.Image = b.image
hostConfig := &runconfig.HostConfig{
- CpuShares: b.cpuShares,
- CpuQuota: b.cpuQuota,
- CpusetCpus: b.cpuSetCpus,
- CpusetMems: b.cpuSetMems,
- Memory: b.memory,
- MemorySwap: b.memorySwap,
+ CpuShares: b.cpuShares,
+ CpuPeriod: b.cpuPeriod,
+ CpuQuota: b.cpuQuota,
+ CpusetCpus: b.cpuSetCpus,
+ CpusetMems: b.cpuSetMems,
+ CgroupParent: b.cgroupParent,
+ Memory: b.memory,
+ MemorySwap: b.memorySwap,
+ NetworkMode: "bridge",
}
config := *b.Config
@@ -618,7 +622,7 @@ func (b *Builder) run(c *daemon.Container) error {
// Wait for it to finish
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
return &jsonmessage.JSONError{
- Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
+ Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
Code: ret,
}
}
@@ -760,16 +764,14 @@ func fixPermissions(source, destination string, uid, gid int, destExisted bool)
func (b *Builder) clearTmp() {
for c := range b.TmpContainers {
- tmp, err := b.Daemon.Get(c)
- if err != nil {
- fmt.Fprint(b.OutStream, err.Error())
+ rmConfig := &daemon.ContainerRmConfig{
+ ForceRemove: true,
+ RemoveVolume: true,
}
-
- if err := b.Daemon.Rm(tmp); err != nil {
+ if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
return
}
- b.Daemon.DeleteVolumes(tmp.VolumePaths())
delete(b.TmpContainers, c)
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
}
diff --git a/builder/job.go b/builder/job.go
index 0ad488aae8..c081dbe9f8 100644
--- a/builder/job.go
+++ b/builder/job.go
@@ -13,7 +13,7 @@ import (
"github.com/docker/docker/builder/parser"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/daemon"
- "github.com/docker/docker/graph"
+ "github.com/docker/docker/graph/tags"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/httputils"
"github.com/docker/docker/pkg/parsers"
@@ -45,13 +45,14 @@ type Config struct {
Remove bool
ForceRemove bool
Pull bool
- JSONFormat bool
Memory int64
MemorySwap int64
CpuShares int64
+ CpuPeriod int64
CpuQuota int64
CpuSetCpus string
CpuSetMems string
+ CgroupParent string
AuthConfig *cliconfig.AuthConfig
ConfigFile *cliconfig.ConfigFile
@@ -97,7 +98,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
return err
}
if len(tag) > 0 {
- if err := graph.ValidateTagName(tag); err != nil {
+ if err := tags.ValidateTagName(tag); err != nil {
return err
}
}
@@ -140,7 +141,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
}
defer context.Close()
- sf := streamformatter.NewStreamFormatter(buildConfig.JSONFormat)
+ sf := streamformatter.NewJSONStreamFormatter()
builder := &Builder{
Daemon: d,
@@ -163,9 +164,11 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
ConfigFile: buildConfig.ConfigFile,
dockerfileName: buildConfig.DockerfileName,
cpuShares: buildConfig.CpuShares,
+ cpuPeriod: buildConfig.CpuPeriod,
cpuQuota: buildConfig.CpuQuota,
cpuSetCpus: buildConfig.CpuSetCpus,
cpuSetMems: buildConfig.CpuSetMems,
+ cgroupParent: buildConfig.CgroupParent,
memory: buildConfig.Memory,
memorySwap: buildConfig.MemorySwap,
cancelled: buildConfig.WaitCancelled(),
diff --git a/builder/parser/parser.go b/builder/parser/parser.go
index 1ab151b30d..2260cd5270 100644
--- a/builder/parser/parser.go
+++ b/builder/parser/parser.go
@@ -29,6 +29,7 @@ type Node struct {
Children []*Node // the children of this sexp
Attributes map[string]bool // special attributes for this node
Original string // original line used before parsing
+ Flags []string // only top Node should have this set
}
var (
@@ -60,7 +61,6 @@ func init() {
command.Entrypoint: parseMaybeJSON,
command.Expose: parseStringsWhitespaceDelimited,
command.Volume: parseMaybeJSONToList,
- command.Insert: parseIgnore,
}
}
@@ -75,7 +75,7 @@ func parseLine(line string) (string, *Node, error) {
return line, nil, nil
}
- cmd, args, err := splitCommand(line)
+ cmd, flags, args, err := splitCommand(line)
if err != nil {
return "", nil, err
}
@@ -91,6 +91,7 @@ func parseLine(line string) (string, *Node, error) {
node.Next = sexp
node.Attributes = attrs
node.Original = line
+ node.Flags = flags
return "", node, nil
}
diff --git a/builder/parser/testfiles/flags/Dockerfile b/builder/parser/testfiles/flags/Dockerfile
new file mode 100644
index 0000000000..2418e0f069
--- /dev/null
+++ b/builder/parser/testfiles/flags/Dockerfile
@@ -0,0 +1,10 @@
+FROM scratch
+COPY foo /tmp/
+COPY --user=me foo /tmp/
+COPY --doit=true foo /tmp/
+COPY --user=me --doit=true foo /tmp/
+COPY --doit=true -- foo /tmp/
+COPY -- foo /tmp/
+CMD --doit [ "a", "b" ]
+CMD --doit=true -- [ "a", "b" ]
+CMD --doit -- [ ]
diff --git a/builder/parser/testfiles/flags/result b/builder/parser/testfiles/flags/result
new file mode 100644
index 0000000000..4578f4cba4
--- /dev/null
+++ b/builder/parser/testfiles/flags/result
@@ -0,0 +1,10 @@
+(from "scratch")
+(copy "foo" "/tmp/")
+(copy ["--user=me"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy "foo" "/tmp/")
+(cmd ["--doit"] "a" "b")
+(cmd ["--doit=true"] "a" "b")
+(cmd ["--doit"])
diff --git a/builder/parser/utils.go b/builder/parser/utils.go
index a60ad129fe..5d82e9604e 100644
--- a/builder/parser/utils.go
+++ b/builder/parser/utils.go
@@ -1,8 +1,10 @@
package parser
import (
+ "fmt"
"strconv"
"strings"
+ "unicode"
)
// dumps the AST defined by `node` as a list of sexps. Returns a string
@@ -11,6 +13,10 @@ func (node *Node) Dump() string {
str := ""
str += node.Value
+ if len(node.Flags) > 0 {
+ str += fmt.Sprintf(" %q", node.Flags)
+ }
+
for _, n := range node.Children {
str += "(" + n.Dump() + ")\n"
}
@@ -48,20 +54,23 @@ func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
// splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions.
-func splitCommand(line string) (string, string, error) {
+func splitCommand(line string) (string, []string, string, error) {
var args string
+ var flags []string
// Make sure we get the same results irrespective of leading/trailing spaces
cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2)
cmd := strings.ToLower(cmdline[0])
if len(cmdline) == 2 {
- args = strings.TrimSpace(cmdline[1])
+ var err error
+ args, flags, err = extractBuilderFlags(cmdline[1])
+ if err != nil {
+ return "", nil, "", err
+ }
}
- // the cmd should never have whitespace, but it's possible for the args to
- // have trailing whitespace.
- return cmd, args, nil
+ return cmd, flags, strings.TrimSpace(args), nil
}
// covers comments and empty lines. Lines should be trimmed before passing to
@@ -74,3 +83,94 @@ func stripComments(line string) string {
return line
}
+
+func extractBuilderFlags(line string) (string, []string, error) {
+ // Parses the BuilderFlags and returns the remaining part of the line
+
+ const (
+ inSpaces = iota // looking for start of a word
+ inWord
+ inQuote
+ )
+
+ words := []string{}
+ phase := inSpaces
+ word := ""
+ quote := '\000'
+ blankOK := false
+ var ch rune
+
+ for pos := 0; pos <= len(line); pos++ {
+ if pos != len(line) {
+ ch = rune(line[pos])
+ }
+
+ if phase == inSpaces { // Looking for start of word
+ if pos == len(line) { // end of input
+ break
+ }
+ if unicode.IsSpace(ch) { // skip spaces
+ continue
+ }
+
+ // Only keep going if the next word starts with --
+ if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+ return line[pos:], words, nil
+ }
+
+ phase = inWord // found someting with "--", fall thru
+ }
+ if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+ if word != "--" && (blankOK || len(word) > 0) {
+ words = append(words, word)
+ }
+ break
+ }
+ if phase == inWord {
+ if unicode.IsSpace(ch) {
+ phase = inSpaces
+ if word == "--" {
+ return line[pos:], words, nil
+ }
+ if blankOK || len(word) > 0 {
+ words = append(words, word)
+ }
+ word = ""
+ blankOK = false
+ continue
+ }
+ if ch == '\'' || ch == '"' {
+ quote = ch
+ blankOK = true
+ phase = inQuote
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ continue
+ }
+ if phase == inQuote {
+ if ch == quote {
+ phase = inWord
+ continue
+ }
+ if ch == '\\' {
+ if pos+1 == len(line) {
+ phase = inWord
+ continue // just skip \ at end
+ }
+ pos++
+ ch = rune(line[pos])
+ }
+ word += string(ch)
+ }
+ }
+
+ return "", words, nil
+}
diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile
index ad90a21183..de888a1a75 100644
--- a/contrib/builder/deb/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/debian-jessie/Dockerfile
@@ -7,7 +7,7 @@ FROM debian:jessie
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
diff --git a/contrib/builder/deb/debian-stretch/Dockerfile b/contrib/builder/deb/debian-stretch/Dockerfile
new file mode 100644
index 0000000000..ee46282472
--- /dev/null
+++ b/contrib/builder/deb/debian-stretch/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM debian:stretch
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile
index 87274d4096..dc9c388098 100644
--- a/contrib/builder/deb/debian-wheezy/Dockerfile
+++ b/contrib/builder/deb/debian-wheezy/Dockerfile
@@ -8,7 +8,7 @@ RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sour
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
diff --git a/contrib/builder/deb/generate.sh b/contrib/builder/deb/generate.sh
index cd187c7ce8..49b26c45f4 100755
--- a/contrib/builder/deb/generate.sh
+++ b/contrib/builder/deb/generate.sh
@@ -59,7 +59,7 @@ for version in "${versions[@]}"; do
echo >> "$version/Dockerfile"
awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
- echo 'RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local' >> "$version/Dockerfile"
+ echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
echo >> "$version/Dockerfile"
diff --git a/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
index 5715b2698b..599a74f890 100644
--- a/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
+++ b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:trusty
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
diff --git a/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
index 3862b83707..81528ce567 100644
--- a/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
+++ b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:utopic
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
diff --git a/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
index 15911b268d..a8e238590a 100644
--- a/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
+++ b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
@@ -7,7 +7,7 @@ FROM ubuntu-debootstrap:vivid
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
ENV GO_VERSION 1.4.2
-RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
ENV PATH $PATH:/usr/local/go/bin
ENV AUTO_GOPATH 1
diff --git a/contrib/builder/rpm/README.md b/contrib/builder/rpm/README.md
new file mode 100644
index 0000000000..153fbceb6a
--- /dev/null
+++ b/contrib/builder/rpm/README.md
@@ -0,0 +1,5 @@
+# `dockercore/builder-rpm`
+
+This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets.
+
+To add new tags, see [`contrib/builder/rpm` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file.
diff --git a/contrib/builder/rpm/build.sh b/contrib/builder/rpm/build.sh
new file mode 100755
index 0000000000..558f7ee0db
--- /dev/null
+++ b/contrib/builder/rpm/build.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+set -x
+./generate.sh
+for d in */; do
+ docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d"
+done
diff --git a/contrib/builder/rpm/centos-6/Dockerfile b/contrib/builder/rpm/centos-6/Dockerfile
new file mode 100644
index 0000000000..2daa715366
--- /dev/null
+++ b/contrib/builder/rpm/centos-6/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:6
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs
diff --git a/contrib/builder/rpm/centos-7/Dockerfile b/contrib/builder/rpm/centos-7/Dockerfile
new file mode 100644
index 0000000000..d7e4f2c0ac
--- /dev/null
+++ b/contrib/builder/rpm/centos-7/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:7
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/fedora-20/Dockerfile b/contrib/builder/rpm/fedora-20/Dockerfile
new file mode 100644
index 0000000000..f0c701bcad
--- /dev/null
+++ b/contrib/builder/rpm/fedora-20/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:20
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/fedora-21/Dockerfile b/contrib/builder/rpm/fedora-21/Dockerfile
new file mode 100644
index 0000000000..3d84706ace
--- /dev/null
+++ b/contrib/builder/rpm/fedora-21/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:21
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/generate.sh b/contrib/builder/rpm/generate.sh
new file mode 100755
index 0000000000..b34193c664
--- /dev/null
+++ b/contrib/builder/rpm/generate.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+set -e
+
+# usage: ./generate.sh [versions]
+# ie: ./generate.sh
+# to update all Dockerfiles in this directory
+# or: ./generate.sh
+# to only update fedora-20/Dockerfile
+# or: ./generate.sh fedora-newversion
+# to create a new folder and a Dockerfile within it
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+versions=( "$@" )
+if [ ${#versions[@]} -eq 0 ]; then
+ versions=( */ )
+fi
+versions=( "${versions[@]%/}" )
+
+for version in "${versions[@]}"; do
+ distro="${version%-*}"
+ suite="${version##*-}"
+ from="${distro}:${suite}"
+
+ mkdir -p "$version"
+ echo "$version -> FROM $from"
+ cat > "$version/Dockerfile" <<-EOF
+ #
+ # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+ #
+
+ FROM $from
+ EOF
+
+ echo >> "$version/Dockerfile"
+
+ case "$from" in
+ centos:*)
+ # get "Development Tools" packages dependencies
+ echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile"
+ ;;
+ *)
+ echo 'RUN yum install -y @development-tools fedora-packager' >> "$version/Dockerfile"
+ ;;
+ esac
+
+ # this list is sorted alphabetically; please keep it that way
+ packages=(
+ btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible)
+ device-mapper-devel # for "libdevmapper.h"
+ glibc-static
+ libselinux-devel # for "libselinux.so"
+ sqlite-devel # for "sqlite3.h"
+ tar # older versions of dev-tools don't have tar
+ )
+ echo "RUN yum install -y ${packages[*]}" >> "$version/Dockerfile"
+
+ echo >> "$version/Dockerfile"
+
+ awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
+ echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
+ echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
+
+ echo >> "$version/Dockerfile"
+
+ echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
+
+ if [ "$from" == "centos:6" ]; then
+ echo 'ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs' >> "$version/Dockerfile"
+ else
+ echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile"
+ fi
+done
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index 8c55de590e..1482777f1a 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -26,6 +26,12 @@ fi
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
+is_set_in_kernel() {
+ zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
+}
+is_set_as_module() {
+ zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
+}
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
declare -A colors=(
@@ -70,8 +76,10 @@ wrap_warning() {
}
check_flag() {
- if is_set "$1"; then
+ if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
+ elif is_set_as_module "$1"; then
+ wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
fi
@@ -83,6 +91,22 @@ check_flags() {
done
}
+check_command() {
+ if command -v "$1" >/dev/null 2>&1; then
+ wrap_good "$1 command" 'available'
+ else
+ wrap_bad "$1 command" 'missing'
+ fi
+}
+
+check_device() {
+ if [ -c "$1" ]; then
+ wrap_good "$1" 'present'
+ else
+ wrap_bad "$1" 'missing'
+ fi
+}
+
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
for tryConfig in "${possibleConfigs[@]}"; do
@@ -139,7 +163,7 @@ flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
- MACVLAN VETH BRIDGE
+ MACVLAN VETH BRIDGE BRIDGE_NETFILTER
NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
NF_NAT NF_NAT_NEEDED
@@ -160,6 +184,8 @@ echo 'Optional Features:'
}
flags=(
RESOURCE_COUNTERS
+ BLK_CGROUP
+ IOSCHED_CFQ
CGROUP_PERF
CFS_BANDWIDTH
)
@@ -182,6 +208,11 @@ echo '- Storage Drivers:'
echo '- "'$(wrap_color 'overlay' blue)'":'
check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/ /'
+
+ echo '- "'$(wrap_color 'zfs' blue)'":'
+ echo " - $(check_device /dev/zfs)"
+ echo " - $(check_command zfs)"
+ echo " - $(check_command zpool)"
} | sed 's/^/ /'
echo
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 5b7a102a68..0f4e2f1a75 100755
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -27,7 +27,7 @@
# This order should be applied to lists, alternatives and code blocks.
__docker_q() {
- docker 2>/dev/null "$@"
+ docker ${host:+-H "$host"} 2>/dev/null "$@"
}
__docker_containers_all() {
@@ -407,7 +407,7 @@ _docker_events() {
_docker_exec() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty -u --user" -- "$cur" ) )
;;
*)
__docker_containers_running
@@ -593,7 +593,7 @@ _docker_logs() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--follow -f --help --tail --timestamps -t" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--follow -f --help --since --tail --timestamps -t" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag '--tail')
@@ -679,6 +679,14 @@ _docker_pull() {
*)
local counter=$(__docker_pos_first_nonflag)
if [ $cword -eq $counter ]; then
+ for arg in "${COMP_WORDS[@]}"; do
+ case "$arg" in
+ --all-tags|-a)
+ __docker_image_repos
+ return
+ ;;
+ esac
+ done
__docker_image_repos_and_tags
fi
;;
@@ -770,6 +778,7 @@ _docker_run() {
--cidfile
--cpuset
--cpu-shares -c
+ --cpu-period
--cpu-quota
--device
--dns
@@ -1003,7 +1012,7 @@ _docker_start() {
_docker_stats() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--no-stream --help" -- "$cur" ) )
;;
*)
__docker_containers_running
@@ -1152,6 +1161,7 @@ _docker() {
--dns-search
--exec-driver -e
--exec-opt
+ --exec-root
--fixed-cidr
--fixed-cidr-v6
--graph -g
@@ -1173,6 +1183,7 @@ _docker() {
"
local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
+ local host
COMPREPLY=()
local cur prev words cword
@@ -1182,6 +1193,11 @@ _docker() {
local counter=1
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
+ # save host so that completion can use custom daemon
+ --host|-H)
+ (( counter++ ))
+ host="${words[$counter]}"
+ ;;
$main_options_with_args_glob )
(( counter++ ))
;;
diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish
index c535911853..79f7ed4bad 100644
--- a/contrib/completion/fish/docker.fish
+++ b/contrib/completion/fish/docker.fish
@@ -16,7 +16,7 @@
function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
for i in (commandline -opc)
- if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait
+ if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats
return 1
end
end
@@ -233,6 +233,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the log
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
@@ -362,6 +363,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_prin
# stats
complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result'
complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
# stop
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 28398f7524..abd666313e 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -305,6 +305,7 @@ __docker_subcommand () {
(logs)
_arguments \
{-f,--follow}'[Follow log output]' \
+ '-s,--since[Show logs since timestamp]' \
{-t,--timestamps}'[Show timestamps]' \
'--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \
'*:containers:__docker_containers'
@@ -326,6 +327,7 @@ __docker_subcommand () {
;;
(stats)
_arguments \
+ '--no-stream[Disable streaming stats and only pull the first result]' \
'*:containers:__docker_runningcontainers'
;;
(rm)
diff --git a/contrib/download-frozen-image.sh b/contrib/download-frozen-image.sh
index 8a2bb50122..29d7ff59fd 100755
--- a/contrib/download-frozen-image.sh
+++ b/contrib/download-frozen-image.sh
@@ -42,6 +42,8 @@ while [ $# -gt 0 ]; do
[ "$tag" != "$imageTag" ] || tag='latest'
tag="${tag%@*}"
+ imageFile="${image//\//_}" # "/" can't be in filenames :)
+
token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
if [ -z "$imageId" ]; then
@@ -60,12 +62,12 @@ while [ $# -gt 0 ]; do
ancestry=( ${ancestryJson//[\[\] \"]/} )
unset IFS
- if [ -s "$dir/tags-$image.tmp" ]; then
- echo -n ', ' >> "$dir/tags-$image.tmp"
+ if [ -s "$dir/tags-$imageFile.tmp" ]; then
+ echo -n ', ' >> "$dir/tags-$imageFile.tmp"
else
images=( "${images[@]}" "$image" )
fi
- echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
+ echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp"
echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
for imageId in "${ancestry[@]}"; do
@@ -90,10 +92,12 @@ done
echo -n '{' > "$dir/repositories"
firstImage=1
for image in "${images[@]}"; do
+ imageFile="${image//\//_}" # "/" can't be in filenames :)
+
[ "$firstImage" ] || echo -n ',' >> "$dir/repositories"
firstImage=
echo -n $'\n\t' >> "$dir/repositories"
- echo -n '"'"$image"'": { '"$(cat "$dir/tags-$image.tmp")"' }' >> "$dir/repositories"
+ echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories"
done
echo -n $'\n}\n' >> "$dir/repositories"
diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf
index 5e8df6e3c2..52721312dd 100644
--- a/contrib/init/upstart/docker.conf
+++ b/contrib/init/upstart/docker.conf
@@ -7,6 +7,8 @@ limit nproc 524288 1048576
respawn
+kill timeout 20
+
pre-start script
# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
if grep -v '^#' /etc/fstab | grep -q cgroup \
diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh
deleted file mode 100755
index feebb17b0e..0000000000
--- a/contrib/mkimage-unittest.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-# Generate a very minimal filesystem based on busybox-static,
-# and load it into the local docker under the name "docker-ut".
-
-missing_pkg() {
- echo "Sorry, I could not locate $1"
- echo "Try 'apt-get install ${2:-$1}'?"
- exit 1
-}
-
-BUSYBOX=$(which busybox)
-[ "$BUSYBOX" ] || missing_pkg busybox busybox-static
-SOCAT=$(which socat)
-[ "$SOCAT" ] || missing_pkg socat
-
-shopt -s extglob
-set -ex
-ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
-trap "rm -rf $ROOTFS" INT QUIT TERM
-cd $ROOTFS
-
-mkdir bin etc dev dev/pts lib proc sys tmp
-touch etc/resolv.conf
-cp /etc/nsswitch.conf etc/nsswitch.conf
-echo root:x:0:0:root:/:/bin/sh > etc/passwd
-echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
-echo root:x:0: > etc/group
-echo daemon:x:1: >> etc/group
-ln -s lib lib64
-ln -s bin sbin
-cp $BUSYBOX $SOCAT bin
-for X in $(busybox --list)
-do
- ln -s busybox bin/$X
-done
-rm bin/init
-ln bin/busybox bin/init
-cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib
-cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
-cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib
-for X in console null ptmx random stdin stdout stderr tty urandom zero
-do
- cp -a /dev/$X dev
-done
-
-chmod 0755 $ROOTFS # See #486
-tar --numeric-owner -cf- . | docker import - docker-ut
-docker run -i -u root docker-ut /bin/echo Success.
-rm -rf $ROOTFS
diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap
index adcb59add7..c613d5375d 100755
--- a/contrib/mkimage/debootstrap
+++ b/contrib/mkimage/debootstrap
@@ -176,11 +176,19 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
s/ $suite / ${suite}-updates /
" "$rootfsDir/etc/apt/sources.list"
echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
- # LTS
- if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then
- head -1 "$rootfsDir/etc/apt/sources.list" \
- | sed "s/ $suite / squeeze-lts /" \
- >> "$rootfsDir/etc/apt/sources.list"
+ # squeeze-lts
+ if [ -f "$rootfsDir/etc/debian_version" ]; then
+ ltsSuite=
+ case "$(cat "$rootfsDir/etc/debian_version")" in
+ 6.*) ltsSuite='squeeze-lts' ;;
+ #7.*) ltsSuite='wheezy-lts' ;;
+ #8.*) ltsSuite='jessie-lts' ;;
+ esac
+ if [ "$ltsSuite" ]; then
+ head -1 "$rootfsDir/etc/apt/sources.list" \
+ | sed "s/ $suite / $ltsSuite /" \
+ >> "$rootfsDir/etc/apt/sources.list"
+ fi
fi
)
fi
diff --git a/contrib/syntax/nano/Dockerfile.nanorc b/contrib/syntax/nano/Dockerfile.nanorc
new file mode 100644
index 0000000000..80e56dfb36
--- /dev/null
+++ b/contrib/syntax/nano/Dockerfile.nanorc
@@ -0,0 +1,26 @@
+## Syntax highlighting for Dockerfiles
+syntax "Dockerfile" "Dockerfile[^/]*$"
+
+## Keywords
+icolor red "^(FROM|MAINTAINER|RUN|CMD|LABEL|EXPOSE|ENV|ADD|COPY|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD)[[:space:]]"
+
+## Brackets & parenthesis
+color brightgreen "(\(|\)|\[|\])"
+
+## Double ampersand
+color brightmagenta "&&"
+
+## Comments
+icolor cyan "^[[:space:]]*#.*$"
+
+## Blank space at EOL
+color ,green "[[:space:]]+$"
+
+## Strings, single-quoted
+color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!"
+
+## Strings, double-quoted
+color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!"
+
+## Single and double quotes
+color brightyellow "('|\")"
diff --git a/contrib/syntax/nano/README.md b/contrib/syntax/nano/README.md
new file mode 100644
index 0000000000..5985208b09
--- /dev/null
+++ b/contrib/syntax/nano/README.md
@@ -0,0 +1,32 @@
+Dockerfile.nanorc
+=================
+
+Dockerfile syntax highlighting for nano
+
+Single User Installation
+------------------------
+1. Create a nano syntax directory in your home directory:
+ * `mkdir -p ~/.nano/syntax`
+
+2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/`
+ * `cp Dockerfile.nanorc ~/.nano/syntax/`
+
+3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file
+ ```
+## Dockerfile files
+include "~/.nano/syntax/Dockerfile.nanorc"
+ ```
+
+System Wide Installation
+------------------------
+1. Create a nano syntax directory:
+ * `mkdir /usr/local/share/nano`
+
+2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano`
+ * `cp Dockerfile.nanorc /usr/local/share/nano/`
+
+3. Add the following to your `/etc/nanorc`:
+ ```
+## Dockerfile files
+include "/usr/local/share/nano/Dockerfile.nanorc"
+ ```
diff --git a/daemon/attach.go b/daemon/attach.go
index b2b8d09067..5193cf101d 100644
--- a/daemon/attach.go
+++ b/daemon/attach.go
@@ -1,229 +1,61 @@
package daemon
import (
- "encoding/json"
"io"
- "os"
- "sync"
- "time"
- "github.com/Sirupsen/logrus"
- "github.com/docker/docker/pkg/jsonlog"
- "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/stdcopy"
)
-func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
- if logs {
- cLog, err := c.ReadLog("json")
- if err != nil && os.IsNotExist(err) {
- // Legacy logs
- logrus.Debugf("Old logs format")
- if stdout != nil {
- cLog, err := c.ReadLog("stdout")
- if err != nil {
- logrus.Errorf("Error reading logs (stdout): %s", err)
- } else if _, err := io.Copy(stdout, cLog); err != nil {
- logrus.Errorf("Error streaming logs (stdout): %s", err)
- }
- }
- if stderr != nil {
- cLog, err := c.ReadLog("stderr")
- if err != nil {
- logrus.Errorf("Error reading logs (stderr): %s", err)
- } else if _, err := io.Copy(stderr, cLog); err != nil {
- logrus.Errorf("Error streaming logs (stderr): %s", err)
- }
- }
- } else if err != nil {
- logrus.Errorf("Error reading logs (json): %s", err)
- } else {
- dec := json.NewDecoder(cLog)
- for {
- l := &jsonlog.JSONLog{}
-
- if err := dec.Decode(l); err == io.EOF {
- break
- } else if err != nil {
- logrus.Errorf("Error streaming logs: %s", err)
- break
- }
- if l.Stream == "stdout" && stdout != nil {
- io.WriteString(stdout, l.Log)
- }
- if l.Stream == "stderr" && stderr != nil {
- io.WriteString(stderr, l.Log)
- }
- }
- }
- }
-
- //stream
- if stream {
- var stdinPipe io.ReadCloser
- if stdin != nil {
- r, w := io.Pipe()
- go func() {
- defer w.Close()
- defer logrus.Debugf("Closing buffered stdin pipe")
- io.Copy(w, stdin)
- }()
- stdinPipe = r
- }
- <-c.Attach(stdinPipe, stdout, stderr)
- // If we are in stdinonce mode, wait for the process to end
- // otherwise, simply return
- if c.Config.StdinOnce && !c.Config.Tty {
- c.WaitStop(-1 * time.Second)
- }
- }
- return nil
+type ContainerAttachWithLogsConfig struct {
+ InStream io.ReadCloser
+ OutStream io.Writer
+ UseStdin, UseStdout, UseStderr bool
+ Logs, Stream bool
+ Multiplex bool
}
-func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
- return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
+func (daemon *Daemon) ContainerAttachWithLogs(name string, c *ContainerAttachWithLogsConfig) error {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return err
+ }
+
+ var errStream io.Writer
+
+ if !container.Config.Tty && c.Multiplex {
+ errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr)
+ c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout)
+ } else {
+ errStream = c.OutStream
+ }
+
+ var stdin io.ReadCloser
+ var stdout, stderr io.Writer
+
+ if c.UseStdin {
+ stdin = c.InStream
+ }
+ if c.UseStdout {
+ stdout = c.OutStream
+ }
+ if c.UseStderr {
+ stderr = errStream
+ }
+
+ return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
}
-func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
- var (
- cStdout, cStderr io.ReadCloser
- cStdin io.WriteCloser
- wg sync.WaitGroup
- errors = make(chan error, 3)
- )
-
- if stdin != nil && openStdin {
- cStdin = streamConfig.StdinPipe()
- wg.Add(1)
- }
-
- if stdout != nil {
- cStdout = streamConfig.StdoutPipe()
- wg.Add(1)
- }
-
- if stderr != nil {
- cStderr = streamConfig.StderrPipe()
- wg.Add(1)
- }
-
- // Connect stdin of container to the http conn.
- go func() {
- if stdin == nil || !openStdin {
- return
- }
- logrus.Debugf("attach: stdin: begin")
- defer func() {
- if stdinOnce && !tty {
- cStdin.Close()
- } else {
- // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
- if cStdout != nil {
- cStdout.Close()
- }
- if cStderr != nil {
- cStderr.Close()
- }
- }
- wg.Done()
- logrus.Debugf("attach: stdin: end")
- }()
-
- var err error
- if tty {
- _, err = copyEscapable(cStdin, stdin)
- } else {
- _, err = io.Copy(cStdin, stdin)
-
- }
- if err == io.ErrClosedPipe {
- err = nil
- }
- if err != nil {
- logrus.Errorf("attach: stdin: %s", err)
- errors <- err
- return
- }
- }()
-
- attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
- if stream == nil {
- return
- }
- defer func() {
- // Make sure stdin gets closed
- if stdin != nil {
- stdin.Close()
- }
- streamPipe.Close()
- wg.Done()
- logrus.Debugf("attach: %s: end", name)
- }()
-
- logrus.Debugf("attach: %s: begin", name)
- _, err := io.Copy(stream, streamPipe)
- if err == io.ErrClosedPipe {
- err = nil
- }
- if err != nil {
- logrus.Errorf("attach: %s: %v", name, err)
- errors <- err
- }
- }
-
- go attachStream("stdout", stdout, cStdout)
- go attachStream("stderr", stderr, cStderr)
-
- return promise.Go(func() error {
- wg.Wait()
- close(errors)
- for err := range errors {
- if err != nil {
- return err
- }
- }
- return nil
- })
+type ContainerWsAttachWithLogsConfig struct {
+ InStream io.ReadCloser
+ OutStream, ErrStream io.Writer
+ Logs, Stream bool
}
-// Code c/c from io.Copy() modified to handle escape sequence
-func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
- buf := make([]byte, 32*1024)
- for {
- nr, er := src.Read(buf)
- if nr > 0 {
- // ---- Docker addition
- // char 16 is C-p
- if nr == 1 && buf[0] == 16 {
- nr, er = src.Read(buf)
- // char 17 is C-q
- if nr == 1 && buf[0] == 17 {
- if err := src.Close(); err != nil {
- return 0, err
- }
- return 0, nil
- }
- }
- // ---- End of docker
- nw, ew := dst.Write(buf[0:nr])
- if nw > 0 {
- written += int64(nw)
- }
- if ew != nil {
- err = ew
- break
- }
- if nr != nw {
- err = io.ErrShortWrite
- break
- }
- }
- if er == io.EOF {
- break
- }
- if er != nil {
- err = er
- break
- }
+func (daemon *Daemon) ContainerWsAttachWithLogs(name string, c *ContainerWsAttachWithLogsConfig) error {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return err
}
- return written, err
+
+ return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
}
diff --git a/daemon/changes.go b/daemon/changes.go
new file mode 100644
index 0000000000..55b230b9b4
--- /dev/null
+++ b/daemon/changes.go
@@ -0,0 +1,13 @@
+package daemon
+
+import "github.com/docker/docker/pkg/archive"
+
+// ContainerChanges returns a list of container fs changes
+func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return container.Changes()
+}
diff --git a/daemon/commit.go b/daemon/commit.go
index 0c49eb2c95..28be6828b3 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -32,7 +32,11 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut
if err != nil {
return nil, err
}
- defer rwTar.Close()
+ defer func() {
+ if rwTar != nil {
+ rwTar.Close()
+ }
+ }()
// Create a new image from the container's base layers + a new layer from container changes
var (
diff --git a/daemon/config.go b/daemon/config.go
index 43b08531b5..3599def874 100644
--- a/daemon/config.go
+++ b/daemon/config.go
@@ -1,11 +1,10 @@
package daemon
import (
- "github.com/docker/docker/daemon/networkdriver"
- "github.com/docker/docker/daemon/networkdriver/bridge"
+ "net"
+
"github.com/docker/docker/opts"
flag "github.com/docker/docker/pkg/mflag"
- "github.com/docker/docker/pkg/ulimit"
"github.com/docker/docker/runconfig"
)
@@ -14,46 +13,60 @@ const (
disableNetworkBridge = "none"
)
-// Config define the configuration of a docker daemon
-// These are the configuration settings that you pass
-// to the docker daemon when you launch it with say: `docker -d -e lxc`
-// FIXME: separate runtime configuration from http api configuration
-type Config struct {
- Bridge bridge.Config
-
- Pidfile string
- Root string
- AutoRestart bool
- Dns []string
- DnsSearch []string
- GraphDriver string
- GraphOptions []string
- ExecDriver string
- ExecOptions []string
- Mtu int
- SocketGroup string
- EnableCors bool
- CorsHeaders string
- DisableNetwork bool
- EnableSelinuxSupport bool
- Context map[string][]string
- TrustKeyPath string
- Labels []string
- Ulimits map[string]*ulimit.Ulimit
- LogConfig runconfig.LogConfig
+// CommonConfig defines the configuration of a docker daemon which are
+// common across platforms.
+type CommonConfig struct {
+ AutoRestart bool
+ // Bridge holds bridge network specific configuration.
+ Bridge bridgeConfig
+ Context map[string][]string
+ CorsHeaders string
+ DisableNetwork bool
+ Dns []string
+ DnsSearch []string
+ EnableCors bool
+ ExecDriver string
+ ExecRoot string
+ GraphDriver string
+ Labels []string
+ LogConfig runconfig.LogConfig
+ Mtu int
+ Pidfile string
+ Root string
+ TrustKeyPath string
}
-// InstallFlags adds command-line options to the top-level flag parser for
+// bridgeConfig stores all the bridge driver specific
+// configuration.
+type bridgeConfig struct {
+ EnableIPv6 bool
+ EnableIPTables bool
+ EnableIPForward bool
+ EnableIPMasq bool
+ EnableUserlandProxy bool
+ DefaultIP net.IP
+ Iface string
+ IP string
+ FixedCIDR string
+ FixedCIDRv6 string
+ DefaultGatewayIPv4 string
+ DefaultGatewayIPv6 string
+ InterContainerCommunication bool
+}
+
+// InstallCommonFlags adds command-line options to the top-level flag parser for
// the current process.
// Subsequent calls to `flag.Parse` will populate config with values parsed
// from the command-line.
-func (config *Config) InstallFlags() {
- flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
- flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Root of the Docker runtime")
+
+func (config *Config) InstallCommonFlags() {
+ flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, "Path to use for daemon PID file")
+ flag.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, "Root of the Docker runtime")
+ flag.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", "Root of the Docker execdriver")
flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
- flag.BoolVar(&config.Bridge.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
- flag.BoolVar(&config.Bridge.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
- flag.BoolVar(&config.Bridge.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
+ flag.BoolVar(&config.Bridge.EnableIPTables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
+ flag.BoolVar(&config.Bridge.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+ flag.BoolVar(&config.Bridge.EnableIPMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
flag.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
flag.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
flag.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
@@ -64,26 +77,16 @@ func (config *Config) InstallFlags() {
flag.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use")
flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use")
- flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU")
- flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
- opts.IPVar(&config.Bridge.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
- opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
- opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
+ opts.IPVar(&config.Bridge.DefaultIP, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
// FIXME: why the inconsistency between "hosts" and "sockets"?
opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")
opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
- config.Ulimits = make(map[string]*ulimit.Ulimit)
- opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Default driver for container logs")
-}
+ opts.LogOptsVar(config.LogConfig.Config, []string{"-log-opt"}, "Set log driver options")
+ flag.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, "Use userland proxy for loopback traffic")
-func getDefaultNetworkMtu() int {
- if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
- return iface.MTU
- }
- return defaultNetworkMtu
}
diff --git a/daemon/config_linux.go b/daemon/config_linux.go
new file mode 100644
index 0000000000..8f2d6f1d2f
--- /dev/null
+++ b/daemon/config_linux.go
@@ -0,0 +1,43 @@
+package daemon
+
+import (
+ "github.com/docker/docker/opts"
+ flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/ulimit"
+)
+
+var (
+ defaultPidFile = "/var/run/docker.pid"
+ defaultGraph = "/var/lib/docker"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
+type Config struct {
+ CommonConfig
+
+ // Fields below here are platform specific.
+ EnableSelinuxSupport bool
+ ExecOptions []string
+ GraphOptions []string
+ SocketGroup string
+ Ulimits map[string]*ulimit.Ulimit
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+ // First handle install flags which are consistent cross-platform
+ config.InstallCommonFlags()
+
+ // Then platform-specific install flags
+ opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+ opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
+ flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
+ flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
+ config.Ulimits = make(map[string]*ulimit.Ulimit)
+ opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
+}
diff --git a/daemon/config_windows.go b/daemon/config_windows.go
new file mode 100644
index 0000000000..30df21eb20
--- /dev/null
+++ b/daemon/config_windows.go
@@ -0,0 +1,32 @@
+package daemon
+
+import (
+ "os"
+)
+
+var (
+ defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid"
+ defaultGraph = os.Getenv("programdata") + string(os.PathSeparator) + "docker"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e windows`
+type Config struct {
+ CommonConfig
+
+ // Fields below here are platform specific. (There are none presently
+ // for the Windows daemon.)
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+ // First handle install flags which are consistent cross-platform
+ config.InstallCommonFlags()
+
+ // Then platform-specific install flags. There are none presently on Windows
+
+}
diff --git a/daemon/container.go b/daemon/container.go
index 0ec409e12c..4a56132244 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -1,51 +1,38 @@
package daemon
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
- "path"
"path/filepath"
"strings"
+ "sync"
"syscall"
"time"
- "github.com/docker/libcontainer/configs"
- "github.com/docker/libcontainer/devices"
"github.com/docker/libcontainer/label"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/logger"
- "github.com/docker/docker/daemon/logger/journald"
"github.com/docker/docker/daemon/logger/jsonfilelog"
- "github.com/docker/docker/daemon/logger/syslog"
"github.com/docker/docker/daemon/network"
- "github.com/docker/docker/daemon/networkdriver/bridge"
- "github.com/docker/docker/engine"
"github.com/docker/docker/image"
- "github.com/docker/docker/links"
"github.com/docker/docker/nat"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcastwriter"
- "github.com/docker/docker/pkg/directory"
- "github.com/docker/docker/pkg/etchosts"
"github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/jsonlog"
+ "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/promise"
- "github.com/docker/docker/pkg/resolvconf"
- "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/symlink"
- "github.com/docker/docker/pkg/ulimit"
"github.com/docker/docker/runconfig"
- "github.com/docker/docker/utils"
+ "github.com/docker/docker/volume"
)
-const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-
var (
ErrNotATTY = errors.New("The PTY is not a file")
ErrNoTTY = errors.New("No PTY found")
@@ -60,56 +47,43 @@ type StreamConfig struct {
stdinPipe io.WriteCloser
}
-type Container struct {
+// CommonContainer holds the settings for a container which are applicable
+// across all platforms supported by the daemon.
+type CommonContainer struct {
+ StreamConfig
+
*State `json:"State"` // Needed for remote api version <= 1.11
root string // Path to the "home" of the container, including metadata.
basefs string // Path to the graphdriver mountpoint
- ID string
-
- Created time.Time
-
- Path string
- Args []string
-
- Config *runconfig.Config
- ImageID string `json:"Image"`
-
- NetworkSettings *network.Settings
-
- ResolvConfPath string
- HostnamePath string
- HostsPath string
- LogPath string
- Name string
- Driver string
- ExecDriver string
-
- command *execdriver.Command
- StreamConfig
-
- daemon *Daemon
+ ID string
+ Created time.Time
+ Path string
+ Args []string
+ Config *runconfig.Config
+ ImageID string `json:"Image"`
+ NetworkSettings *network.Settings
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Name string
+ Driver string
+ ExecDriver string
MountLabel, ProcessLabel string
- AppArmorProfile string
RestartCount int
UpdateDns bool
+ MountPoints map[string]*mountPoint
- // Maps container paths to volume paths. The key in this is the path to which
- // the volume is being mounted inside the container. Value is the path of the
- // volume on disk
- Volumes map[string]string
- // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
- // Easier than migrating older container configs :)
- VolumesRW map[string]bool
hostConfig *runconfig.HostConfig
+ command *execdriver.Command
- activeLinks map[string]*links.Link
monitor *containerMonitor
execCommands *execStore
+ daemon *Daemon
// logDriver for closing
- logDriver logger.Logger
- logCopier *logger.Copier
- AppliedVolumesFrom map[string]struct{}
+ logDriver logger.Logger
+ logCopier *logger.Copier
}
func (container *Container) FromDisk() error {
@@ -245,184 +219,6 @@ func (container *Container) GetRootResourcePath(path string) (string, error) {
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
}
-func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
- device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
- // if there was no error, return the device
- if err == nil {
- device.Path = deviceMapping.PathInContainer
- return append(devs, device), nil
- }
-
- // if the device is not a device node
- // try to see if it's a directory holding many devices
- if err == devices.ErrNotADevice {
-
- // check if it is a directory
- if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
-
- // mount the internal devices recursively
- filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
- childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
- if e != nil {
- // ignore the device
- return nil
- }
-
- // add the device to userSpecified devices
- childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
- devs = append(devs, childDevice)
-
- return nil
- })
- }
- }
-
- if len(devs) > 0 {
- return devs, nil
- }
-
- return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
-}
-
-func populateCommand(c *Container, env []string) error {
- en := &execdriver.Network{
- Mtu: c.daemon.config.Mtu,
- Interface: nil,
- }
-
- parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
- switch parts[0] {
- case "none":
- case "host":
- en.HostNetworking = true
- case "bridge", "": // empty string to support existing containers
- if !c.Config.NetworkDisabled {
- network := c.NetworkSettings
- en.Interface = &execdriver.NetworkInterface{
- Gateway: network.Gateway,
- Bridge: network.Bridge,
- IPAddress: network.IPAddress,
- IPPrefixLen: network.IPPrefixLen,
- MacAddress: network.MacAddress,
- LinkLocalIPv6Address: network.LinkLocalIPv6Address,
- GlobalIPv6Address: network.GlobalIPv6Address,
- GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen,
- IPv6Gateway: network.IPv6Gateway,
- }
- }
- case "container":
- nc, err := c.getNetworkedContainer()
- if err != nil {
- return err
- }
- en.ContainerID = nc.ID
- default:
- return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
- }
-
- ipc := &execdriver.Ipc{}
-
- if c.hostConfig.IpcMode.IsContainer() {
- ic, err := c.getIpcContainer()
- if err != nil {
- return err
- }
- ipc.ContainerID = ic.ID
- } else {
- ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
- }
-
- pid := &execdriver.Pid{}
- pid.HostPid = c.hostConfig.PidMode.IsHost()
-
- // Build lists of devices allowed and created within the container.
- var userSpecifiedDevices []*configs.Device
- for _, deviceMapping := range c.hostConfig.Devices {
- devs, err := getDevicesFromPath(deviceMapping)
- if err != nil {
- return err
- }
-
- userSpecifiedDevices = append(userSpecifiedDevices, devs...)
- }
- allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
-
- autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
-
- // TODO: this can be removed after lxc-conf is fully deprecated
- lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
- if err != nil {
- return err
- }
-
- var rlimits []*ulimit.Rlimit
- ulimits := c.hostConfig.Ulimits
-
- // Merge ulimits with daemon defaults
- ulIdx := make(map[string]*ulimit.Ulimit)
- for _, ul := range ulimits {
- ulIdx[ul.Name] = ul
- }
- for name, ul := range c.daemon.config.Ulimits {
- if _, exists := ulIdx[name]; !exists {
- ulimits = append(ulimits, ul)
- }
- }
-
- for _, limit := range ulimits {
- rl, err := limit.GetRlimit()
- if err != nil {
- return err
- }
- rlimits = append(rlimits, rl)
- }
-
- resources := &execdriver.Resources{
- Memory: c.hostConfig.Memory,
- MemorySwap: c.hostConfig.MemorySwap,
- CpuShares: c.hostConfig.CpuShares,
- CpusetCpus: c.hostConfig.CpusetCpus,
- CpusetMems: c.hostConfig.CpusetMems,
- CpuQuota: c.hostConfig.CpuQuota,
- Rlimits: rlimits,
- }
-
- processConfig := execdriver.ProcessConfig{
- Privileged: c.hostConfig.Privileged,
- Entrypoint: c.Path,
- Arguments: c.Args,
- Tty: c.Config.Tty,
- User: c.Config.User,
- }
-
- processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
- processConfig.Env = env
-
- c.command = &execdriver.Command{
- ID: c.ID,
- Rootfs: c.RootfsPath(),
- ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
- InitPath: "/.dockerinit",
- WorkingDir: c.Config.WorkingDir,
- Network: en,
- Ipc: ipc,
- Pid: pid,
- Resources: resources,
- AllowedDevices: allowedDevices,
- AutoCreatedDevices: autoCreatedDevices,
- CapAdd: c.hostConfig.CapAdd,
- CapDrop: c.hostConfig.CapDrop,
- ProcessConfig: processConfig,
- ProcessLabel: c.GetProcessLabel(),
- MountLabel: c.GetMountLabel(),
- LxcConfig: lxcConfig,
- AppArmorProfile: c.AppArmorProfile,
- CgroupParent: c.hostConfig.CgroupParent,
- }
-
- return nil
-}
-
func (container *Container) Start() (err error) {
container.Lock()
defer container.Unlock()
@@ -449,22 +245,13 @@ func (container *Container) Start() (err error) {
}
}()
- if err := container.setupContainerDns(); err != nil {
- return err
- }
if err := container.Mount(); err != nil {
return err
}
if err := container.initializeNetworking(); err != nil {
return err
}
- if err := container.updateParentsHosts(); err != nil {
- return err
- }
container.verifyDaemonSettings()
- if err := container.prepareVolumes(); err != nil {
- return err
- }
linkedEnv, err := container.setupLinkedContainers()
if err != nil {
return err
@@ -476,10 +263,13 @@ func (container *Container) Start() (err error) {
if err := populateCommand(container, env); err != nil {
return err
}
- if err := container.setupMounts(); err != nil {
+
+ mounts, err := container.setupMounts()
+ if err != nil {
return err
}
+ container.command.Mounts = mounts
return container.waitForStart()
}
@@ -538,181 +328,16 @@ func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser {
return ioutils.NewBufReader(reader)
}
-func (container *Container) buildHostnameFile() error {
- hostnamePath, err := container.GetRootResourcePath("hostname")
- if err != nil {
- return err
- }
- container.HostnamePath = hostnamePath
-
- if container.Config.Domainname != "" {
- return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
- }
- return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
-}
-
-func (container *Container) buildHostsFiles(IP string) error {
-
- hostsPath, err := container.GetRootResourcePath("hosts")
- if err != nil {
- return err
- }
- container.HostsPath = hostsPath
-
- var extraContent []etchosts.Record
-
- children, err := container.daemon.Children(container.Name)
- if err != nil {
- return err
- }
-
- for linkAlias, child := range children {
- _, alias := path.Split(linkAlias)
- // allow access to the linked container via the alias, real name, and container hostname
- aliasList := alias + " " + child.Config.Hostname
- // only add the name if alias isn't equal to the name
- if alias != child.Name[1:] {
- aliasList = aliasList + " " + child.Name[1:]
- }
- extraContent = append(extraContent, etchosts.Record{Hosts: aliasList, IP: child.NetworkSettings.IPAddress})
- }
-
- for _, extraHost := range container.hostConfig.ExtraHosts {
- // allow IPv6 addresses in extra hosts; only split on first ":"
- parts := strings.SplitN(extraHost, ":", 2)
- extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
- }
-
- return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent)
-}
-
-func (container *Container) buildHostnameAndHostsFiles(IP string) error {
- if err := container.buildHostnameFile(); err != nil {
- return err
- }
-
- return container.buildHostsFiles(IP)
-}
-
-func (container *Container) AllocateNetwork() error {
- mode := container.hostConfig.NetworkMode
- if container.Config.NetworkDisabled || !mode.IsPrivate() {
- return nil
- }
-
- var (
- err error
- eng = container.daemon.eng
- )
-
- networkSettings, err := bridge.Allocate(container.ID, container.Config.MacAddress, "", "")
- if err != nil {
- return err
- }
-
- // Error handling: At this point, the interface is allocated so we have to
- // make sure that it is always released in case of error, otherwise we
- // might leak resources.
-
- if container.Config.PortSpecs != nil {
- if err = migratePortMappings(container.Config, container.hostConfig); err != nil {
- bridge.Release(container.ID)
- return err
- }
- container.Config.PortSpecs = nil
- if err = container.WriteHostConfig(); err != nil {
- bridge.Release(container.ID)
- return err
- }
- }
-
- var (
- portSpecs = make(nat.PortSet)
- bindings = make(nat.PortMap)
- )
-
- if container.Config.ExposedPorts != nil {
- portSpecs = container.Config.ExposedPorts
- }
-
- if container.hostConfig.PortBindings != nil {
- for p, b := range container.hostConfig.PortBindings {
- bindings[p] = []nat.PortBinding{}
- for _, bb := range b {
- bindings[p] = append(bindings[p], nat.PortBinding{
- HostIp: bb.HostIp,
- HostPort: bb.HostPort,
- })
- }
- }
- }
-
- container.NetworkSettings.PortMapping = nil
-
- for port := range portSpecs {
- if err = container.allocatePort(eng, port, bindings); err != nil {
- bridge.Release(container.ID)
- return err
- }
- }
- container.WriteHostConfig()
-
- networkSettings.Ports = bindings
- container.NetworkSettings = networkSettings
-
- return nil
-}
-
-func (container *Container) ReleaseNetwork() {
- if container.Config.NetworkDisabled || !container.hostConfig.NetworkMode.IsPrivate() {
- return
- }
-
- bridge.Release(container.ID)
-
- container.NetworkSettings = &network.Settings{}
-}
-
func (container *Container) isNetworkAllocated() bool {
return container.NetworkSettings.IPAddress != ""
}
-func (container *Container) RestoreNetwork() error {
- mode := container.hostConfig.NetworkMode
- // Don't attempt a restore if we previously didn't allocate networking.
- // This might be a legacy container with no network allocated, in which case the
- // allocation will happen once and for all at start.
- if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() {
- return nil
- }
-
- eng := container.daemon.eng
-
- // Re-allocate the interface with the same IP and MAC address.
- if _, err := bridge.Allocate(container.ID, container.NetworkSettings.MacAddress, container.NetworkSettings.IPAddress, ""); err != nil {
- return err
- }
-
- // Re-allocate any previously allocated ports.
- for port := range container.NetworkSettings.Ports {
- if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil {
- return err
- }
- }
- return nil
-}
-
// cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (container *Container) cleanup() {
container.ReleaseNetwork()
- // Disable all active links
- if container.activeLinks != nil {
- for _, link := range container.activeLinks {
- link.Disable()
- }
- }
+ disableAllActiveLinks(container)
if err := container.Unmount(); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
@@ -721,6 +346,8 @@ func (container *Container) cleanup() {
for _, eConfig := range container.execCommands.s {
container.daemon.unregisterExecCommand(eConfig)
}
+
+ container.UnmountVolumes(false)
}
func (container *Container) KillSig(sig int) error {
@@ -762,23 +389,45 @@ func (container *Container) killPossiblyDeadProcess(sig int) error {
}
func (container *Container) Pause() error {
- if container.IsPaused() {
+ container.Lock()
+ defer container.Unlock()
+
+ // We cannot Pause the container which is already paused
+ if container.Paused {
return fmt.Errorf("Container %s is already paused", container.ID)
}
- if !container.IsRunning() {
+
+ // We cannot Pause the container which is not running
+ if !container.Running {
return fmt.Errorf("Container %s is not running", container.ID)
}
- return container.daemon.Pause(container)
+
+ if err := container.daemon.execDriver.Pause(container.command); err != nil {
+ return err
+ }
+ container.Paused = true
+ return nil
}
func (container *Container) Unpause() error {
- if !container.IsPaused() {
- return fmt.Errorf("Container %s is not paused", container.ID)
+ container.Lock()
+ defer container.Unlock()
+
+ // We cannot unpause the container which is not paused
+ if !container.Paused {
+ return fmt.Errorf("Container %s is not paused, so what", container.ID)
}
- if !container.IsRunning() {
+
+ // We cannot unpause the container which is not running
+ if !container.Running {
return fmt.Errorf("Container %s is not running", container.ID)
}
- return container.daemon.Unpause(container)
+
+ if err := container.daemon.execDriver.Unpause(container.command); err != nil {
+ return err
+ }
+ container.Paused = false
+ return nil
}
func (container *Container) Kill() error {
@@ -792,17 +441,8 @@ func (container *Container) Kill() error {
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
- if _, err := container.WaitStop(10 * time.Second); err != nil {
- // Ensure that we don't kill ourselves
- if pid := container.GetPid(); pid != 0 {
- logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
- if err := syscall.Kill(pid, 9); err != nil {
- if err != syscall.ESRCH {
- return err
- }
- logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
- }
- }
+ if err := killProcessDirectly(container); err != nil {
+ return err
}
container.WaitStop(-1 * time.Second)
@@ -831,6 +471,7 @@ func (container *Container) Stop(seconds int) error {
return err
}
}
+
return nil
}
@@ -855,26 +496,6 @@ func (container *Container) Resize(h, w int) error {
return container.command.ProcessConfig.Terminal.Resize(h, w)
}
-func (container *Container) ExportRw() (archive.Archive, error) {
- if err := container.Mount(); err != nil {
- return nil, err
- }
- if container.daemon == nil {
- return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
- }
- archive, err := container.daemon.Diff(container)
- if err != nil {
- container.Unmount()
- return nil, err
- }
- return ioutils.NewReadCloserWrapper(archive, func() error {
- err := archive.Close()
- container.Unmount()
- return err
- }),
- nil
-}
-
func (container *Container) Export() (archive.Archive, error) {
if err := container.Mount(); err != nil {
return nil, err
@@ -918,18 +539,6 @@ func (container *Container) Unmount() error {
return container.daemon.Unmount(container)
}
-func (container *Container) logPath(name string) (string, error) {
- return container.GetRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
-}
-
-func (container *Container) ReadLog(name string) (io.Reader, error) {
- pth, err := container.logPath(name)
- if err != nil {
- return nil, err
- }
- return os.Open(pth)
-}
-
func (container *Container) hostConfigPath() (string, error) {
return container.GetRootResourcePath("hostconfig.json")
}
@@ -951,37 +560,6 @@ func validateID(id string) error {
return nil
}
-// GetSize, return real size, virtual size
-func (container *Container) GetSize() (int64, int64) {
- var (
- sizeRw, sizeRootfs int64
- err error
- driver = container.daemon.driver
- )
-
- if err := container.Mount(); err != nil {
- logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
- return sizeRw, sizeRootfs
- }
- defer container.Unmount()
-
- initID := fmt.Sprintf("%s-init", container.ID)
- sizeRw, err = driver.DiffSize(container.ID, initID)
- if err != nil {
- logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
- // FIXME: GetSize should return an error. Not changing it now in case
- // there is a side-effect.
- sizeRw = -1
- }
-
- if _, err = os.Stat(container.basefs); err != nil {
- if sizeRootfs, err = directory.Size(container.basefs); err != nil {
- sizeRootfs = -1
- }
- }
- return sizeRw, sizeRootfs
-}
-
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
container.Lock()
defer container.Unlock()
@@ -991,39 +569,42 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
}
defer func() {
if err != nil {
+ // unmount any volumes
+ container.UnmountVolumes(true)
+ // unmount the container's rootfs
container.Unmount()
}
}()
-
- if err = container.mountVolumes(); err != nil {
- container.unmountVolumes()
+ mounts, err := container.setupMounts()
+ if err != nil {
return nil, err
}
- defer func() {
+ for _, m := range mounts {
+ dest, err := container.GetResourcePath(m.Destination)
if err != nil {
- container.unmountVolumes()
+ return nil, err
}
- }()
-
+ if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
+ return nil, err
+ }
+ }
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
-
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
}
var filter []string
if !stat.IsDir() {
- d, f := path.Split(basePath)
+ d, f := filepath.Split(basePath)
basePath = d
filter = []string{f}
} else {
- filter = []string{path.Base(basePath)}
- basePath = path.Dir(basePath)
+ filter = []string{filepath.Base(basePath)}
+ basePath = filepath.Dir(basePath)
}
-
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
@@ -1031,10 +612,9 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
-
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
- container.unmountVolumes()
+ container.UnmountVolumes(true)
container.Unmount()
return err
}),
@@ -1048,414 +628,53 @@ func (container *Container) Exposes(p nat.Port) bool {
}
func (container *Container) HostConfig() *runconfig.HostConfig {
- container.Lock()
- res := container.hostConfig
- container.Unlock()
- return res
+ return container.hostConfig
}
func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
- container.Lock()
container.hostConfig = hostConfig
- container.Unlock()
}
-func (container *Container) DisableLink(name string) {
- if container.activeLinks != nil {
- if link, exists := container.activeLinks[name]; exists {
- link.Disable()
- } else {
- logrus.Debugf("Could not find active link for %s", name)
- }
+func (container *Container) getLogConfig() runconfig.LogConfig {
+ cfg := container.hostConfig.LogConfig
+ if cfg.Type != "" { // container has log driver configured
+ return cfg
}
+ // Use daemon's default log config for containers
+ return container.daemon.defaultLogConfig
}
-func (container *Container) setupContainerDns() error {
- if container.ResolvConfPath != "" {
- // check if this is an existing container that needs DNS update:
- if container.UpdateDns {
- // read the host's resolv.conf, get the hash and call updateResolvConf
- logrus.Debugf("Check container (%s) for update to resolv.conf - UpdateDns flag was set", container.ID)
- latestResolvConf, latestHash := resolvconf.GetLastModified()
-
- // clean container resolv.conf re: localhost nameservers and IPv6 NS (if IPv6 disabled)
- updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.Bridge.EnableIPv6)
- if modified {
- // changes have occurred during resolv.conf localhost cleanup: generate an updated hash
- newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf))
- if err != nil {
- return err
- }
- latestHash = newHash
- }
-
- if err := container.updateResolvConf(updatedResolvConf, latestHash); err != nil {
- return err
- }
- // successful update of the restarting container; set the flag off
- container.UpdateDns = false
- }
- return nil
- }
-
- var (
- config = container.hostConfig
- daemon = container.daemon
- )
-
- resolvConf, err := resolvconf.Get()
+func (container *Container) getLogger() (logger.Logger, error) {
+ cfg := container.getLogConfig()
+ c, err := logger.GetLogDriver(cfg.Type)
if err != nil {
- return err
+ return nil, fmt.Errorf("Failed to get logging factory: %v", err)
}
- container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
- if err != nil {
- return err
+ ctx := logger.Context{
+ Config: cfg.Config,
+ ContainerID: container.ID,
+ ContainerName: container.Name,
}
- if config.NetworkMode != "host" {
- // check configurations for any container/daemon dns settings
- if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
- var (
- dns = resolvconf.GetNameservers(resolvConf)
- dnsSearch = resolvconf.GetSearchDomains(resolvConf)
- )
- if len(config.Dns) > 0 {
- dns = config.Dns
- } else if len(daemon.config.Dns) > 0 {
- dns = daemon.config.Dns
- }
- if len(config.DnsSearch) > 0 {
- dnsSearch = config.DnsSearch
- } else if len(daemon.config.DnsSearch) > 0 {
- dnsSearch = daemon.config.DnsSearch
- }
- return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
- }
-
- // replace any localhost/127.*, and remove IPv6 nameservers if IPv6 disabled in daemon
- resolvConf, _ = resolvconf.FilterResolvDns(resolvConf, daemon.config.Bridge.EnableIPv6)
- }
- //get a sha256 hash of the resolv conf at this point so we can check
- //for changes when the host resolv.conf changes (e.g. network update)
- resolvHash, err := ioutils.HashData(bytes.NewReader(resolvConf))
- if err != nil {
- return err
- }
- resolvHashFile := container.ResolvConfPath + ".hash"
- if err = ioutil.WriteFile(resolvHashFile, []byte(resolvHash), 0644); err != nil {
- return err
- }
- return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
-}
-
-// called when the host's resolv.conf changes to check whether container's resolv.conf
-// is unchanged by the container "user" since container start: if unchanged, the
-// container's resolv.conf will be updated to match the host's new resolv.conf
-func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolvHash string) error {
-
- if container.ResolvConfPath == "" {
- return nil
- }
- if container.Running {
- //set a marker in the hostConfig to update on next start/restart
- container.UpdateDns = true
- return nil
- }
-
- resolvHashFile := container.ResolvConfPath + ".hash"
-
- //read the container's current resolv.conf and compute the hash
- resolvBytes, err := ioutil.ReadFile(container.ResolvConfPath)
- if err != nil {
- return err
- }
- curHash, err := ioutils.HashData(bytes.NewReader(resolvBytes))
- if err != nil {
- return err
- }
-
- //read the hash from the last time we wrote resolv.conf in the container
- hashBytes, err := ioutil.ReadFile(resolvHashFile)
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- // backwards compat: if no hash file exists, this container pre-existed from
- // a Docker daemon that didn't contain this update feature. Given we can't know
- // if the user has modified the resolv.conf since container start time, safer
- // to just never update the container's resolv.conf during it's lifetime which
- // we can control by setting hashBytes to an empty string
- hashBytes = []byte("")
- }
-
- //if the user has not modified the resolv.conf of the container since we wrote it last
- //we will replace it with the updated resolv.conf from the host
- if string(hashBytes) == curHash {
- logrus.Debugf("replacing %q with updated host resolv.conf", container.ResolvConfPath)
-
- // for atomic updates to these files, use temporary files with os.Rename:
- dir := path.Dir(container.ResolvConfPath)
- tmpHashFile, err := ioutil.TempFile(dir, "hash")
+ // Set logging file for "json-logger"
+ if cfg.Type == jsonfilelog.Name {
+ ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
if err != nil {
- return err
- }
- tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
- if err != nil {
- return err
- }
-
- // write the updates to the temp files
- if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newResolvHash), 0644); err != nil {
- return err
- }
- if err = ioutil.WriteFile(tmpResolvFile.Name(), updatedResolvConf, 0644); err != nil {
- return err
- }
-
- // rename the temp files for atomic replace
- if err = os.Rename(tmpHashFile.Name(), resolvHashFile); err != nil {
- return err
- }
- return os.Rename(tmpResolvFile.Name(), container.ResolvConfPath)
- }
- return nil
-}
-
-func (container *Container) updateParentsHosts() error {
- refs := container.daemon.ContainerGraph().RefPaths(container.ID)
- for _, ref := range refs {
- if ref.ParentID == "0" {
- continue
- }
-
- c, err := container.daemon.Get(ref.ParentID)
- if err != nil {
- logrus.Error(err)
- }
-
- if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
- logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
- if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
- logrus.Errorf("Failed to update /etc/hosts in parent container %s for alias %s: %v", c.ID, ref.Name, err)
- }
+ return nil, err
}
}
- return nil
-}
-
-func (container *Container) initializeNetworking() error {
- var err error
- if container.hostConfig.NetworkMode.IsHost() {
- container.Config.Hostname, err = os.Hostname()
- if err != nil {
- return err
- }
-
- parts := strings.SplitN(container.Config.Hostname, ".", 2)
- if len(parts) > 1 {
- container.Config.Hostname = parts[0]
- container.Config.Domainname = parts[1]
- }
-
- content, err := ioutil.ReadFile("/etc/hosts")
- if os.IsNotExist(err) {
- return container.buildHostnameAndHostsFiles("")
- } else if err != nil {
- return err
- }
-
- if err := container.buildHostnameFile(); err != nil {
- return err
- }
-
- hostsPath, err := container.GetRootResourcePath("hosts")
- if err != nil {
- return err
- }
- container.HostsPath = hostsPath
-
- return ioutil.WriteFile(container.HostsPath, content, 0644)
- }
- if container.hostConfig.NetworkMode.IsContainer() {
- // we need to get the hosts files from the container to join
- nc, err := container.getNetworkedContainer()
- if err != nil {
- return err
- }
- container.HostnamePath = nc.HostnamePath
- container.HostsPath = nc.HostsPath
- container.ResolvConfPath = nc.ResolvConfPath
- container.Config.Hostname = nc.Config.Hostname
- container.Config.Domainname = nc.Config.Domainname
- return nil
- }
- if container.daemon.config.DisableNetwork {
- container.Config.NetworkDisabled = true
- return container.buildHostnameAndHostsFiles("127.0.1.1")
- }
- if err := container.AllocateNetwork(); err != nil {
- return err
- }
- return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
-}
-
-// Make sure the config is compatible with the current kernel
-func (container *Container) verifyDaemonSettings() {
- if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
- logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
- container.hostConfig.Memory = 0
- }
- if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit {
- logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
- container.hostConfig.MemorySwap = -1
- }
- if container.daemon.sysInfo.IPv4ForwardingDisabled {
- logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
- }
-}
-
-func (container *Container) setupLinkedContainers() ([]string, error) {
- var (
- env []string
- daemon = container.daemon
- )
- children, err := daemon.Children(container.Name)
- if err != nil {
- return nil, err
- }
-
- if len(children) > 0 {
- container.activeLinks = make(map[string]*links.Link, len(children))
-
- // If we encounter an error make sure that we rollback any network
- // config and iptables changes
- rollback := func() {
- for _, link := range container.activeLinks {
- link.Disable()
- }
- container.activeLinks = nil
- }
-
- for linkAlias, child := range children {
- if !child.IsRunning() {
- return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
- }
-
- link, err := links.NewLink(
- container.NetworkSettings.IPAddress,
- child.NetworkSettings.IPAddress,
- linkAlias,
- child.Config.Env,
- child.Config.ExposedPorts,
- )
-
- if err != nil {
- rollback()
- return nil, err
- }
-
- container.activeLinks[link.Alias()] = link
- if err := link.Enable(); err != nil {
- rollback()
- return nil, err
- }
-
- for _, envVar := range link.ToEnv() {
- env = append(env, envVar)
- }
- }
- }
- return env, nil
-}
-
-func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
- // if a domain name was specified, append it to the hostname (see #7851)
- fullHostname := container.Config.Hostname
- if container.Config.Domainname != "" {
- fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
- }
- // Setup environment
- env := []string{
- "PATH=" + DefaultPathEnv,
- "HOSTNAME=" + fullHostname,
- // Note: we don't set HOME here because it'll get autoset intelligently
- // based on the value of USER inside dockerinit, but only if it isn't
- // set already (ie, that can be overridden by setting HOME via -e or ENV
- // in a Dockerfile).
- }
- if container.Config.Tty {
- env = append(env, "TERM=xterm")
- }
- env = append(env, linkedEnv...)
- // because the env on the container can override certain default values
- // we need to replace the 'env' keys where they match and append anything
- // else.
- env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
-
- return env
-}
-
-func (container *Container) setupWorkingDirectory() error {
- if container.Config.WorkingDir != "" {
- container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
-
- pth, err := container.GetResourcePath(container.Config.WorkingDir)
- if err != nil {
- return err
- }
-
- pthInfo, err := os.Stat(pth)
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
-
- if err := os.MkdirAll(pth, 0755); err != nil {
- return err
- }
- }
- if pthInfo != nil && !pthInfo.IsDir() {
- return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
- }
- }
- return nil
+ return c(ctx)
}
func (container *Container) startLogging() error {
- cfg := container.hostConfig.LogConfig
- if cfg.Type == "" {
- cfg = container.daemon.defaultLogConfig
+ cfg := container.getLogConfig()
+ if cfg.Type == "none" {
+ return nil // do not start logging routines
}
- var l logger.Logger
- switch cfg.Type {
- case "json-file":
- pth, err := container.logPath("json")
- if err != nil {
- return err
- }
- container.LogPath = pth
- dl, err := jsonfilelog.New(pth)
- if err != nil {
- return err
- }
- l = dl
- case "syslog":
- dl, err := syslog.New(container.ID[:12])
- if err != nil {
- return err
- }
- l = dl
- case "journald":
- dl, err := journald.New(container.ID[:12])
- if err != nil {
- return err
- }
- l = dl
- case "none":
- return nil
- default:
- return fmt.Errorf("Unknown logging driver: %s", cfg.Type)
+ l, err := container.getLogger()
+ if err != nil {
+ return fmt.Errorf("Failed to initialize logging driver: %v", err)
}
copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
@@ -1466,6 +685,11 @@ func (container *Container) startLogging() error {
copier.Run()
container.logDriver = l
+ // set LogPath field only for json-file logdriver
+ if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
+ container.LogPath = jl.LogPath()
+ }
+
return nil
}
@@ -1483,23 +707,6 @@ func (container *Container) waitForStart() error {
return nil
}
-func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error {
- binding := bindings[port]
- if container.hostConfig.PublishAllPorts && len(binding) == 0 {
- binding = append(binding, nat.PortBinding{})
- }
-
- for i := 0; i < len(binding); i++ {
- b, err := bridge.AllocatePort(container.ID, port, binding[i])
- if err != nil {
- return err
- }
- binding[i] = b
- }
- bindings[port] = binding
- return nil
-}
-
func (container *Container) GetProcessLabel() string {
// even if we have a process label return "" if we are running
// in privileged mode
@@ -1516,41 +723,6 @@ func (container *Container) GetMountLabel() string {
return container.MountLabel
}
-func (container *Container) getIpcContainer() (*Container, error) {
- containerID := container.hostConfig.IpcMode.Container()
- c, err := container.daemon.Get(containerID)
- if err != nil {
- return nil, err
- }
- if !c.IsRunning() {
- return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
- }
- return c, nil
-}
-
-func (container *Container) getNetworkedContainer() (*Container, error) {
- parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
- switch parts[0] {
- case "container":
- if len(parts) != 2 {
- return nil, fmt.Errorf("no container specified to join network")
- }
- nc, err := container.daemon.Get(parts[1])
- if err != nil {
- return nil, err
- }
- if container == nc {
- return nil, fmt.Errorf("cannot join own network")
- }
- if !nc.IsRunning() {
- return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
- }
- return nc, nil
- default:
- return nil, fmt.Errorf("network mode not set to container")
- }
-}
-
func (container *Container) Stats() (*execdriver.ResourceStats, error) {
return container.daemon.Stats(container)
}
@@ -1563,3 +735,379 @@ func (c *Container) LogDriverType() string {
}
return c.hostConfig.LogConfig.Type
}
+
+func (container *Container) GetExecIDs() []string {
+ return container.execCommands.List()
+}
+
+func (container *Container) Exec(execConfig *execConfig) error {
+ container.Lock()
+ defer container.Unlock()
+
+ waitStart := make(chan struct{})
+
+ callback := func(processConfig *execdriver.ProcessConfig, pid int) {
+ if processConfig.Tty {
+ // The callback is called after the process Start()
+ // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
+ // which we close here.
+ if c, ok := processConfig.Stdout.(io.Closer); ok {
+ c.Close()
+ }
+ }
+ close(waitStart)
+ }
+
+ // We use a callback here instead of a goroutine and an chan for
+ // syncronization purposes
+ cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
+
+ // Exec should not return until the process is actually running
+ select {
+ case <-waitStart:
+ case err := <-cErr:
+ return err
+ }
+
+ return nil
+}
+
+func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
+ var (
+ err error
+ exitCode int
+ )
+
+ pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
+ exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
+ if err != nil {
+ logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
+ }
+
+ logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
+ if execConfig.OpenStdin {
+ if err := execConfig.StreamConfig.stdin.Close(); err != nil {
+ logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
+ }
+ }
+ if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
+ logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
+ }
+ if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
+ logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
+ }
+ if execConfig.ProcessConfig.Terminal != nil {
+ if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
+ logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
+ }
+ }
+
+ return err
+}
+
+func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+ return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
+}
+
+func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
+ if logs {
+ logDriver, err := c.getLogger()
+ cLog, err := logDriver.GetReader()
+
+ if err != nil {
+ logrus.Errorf("Error reading logs: %s", err)
+ } else if c.LogDriverType() != jsonfilelog.Name {
+ logrus.Errorf("Reading logs not implemented for driver %s", c.LogDriverType())
+ } else {
+ dec := json.NewDecoder(cLog)
+ for {
+ l := &jsonlog.JSONLog{}
+
+ if err := dec.Decode(l); err == io.EOF {
+ break
+ } else if err != nil {
+ logrus.Errorf("Error streaming logs: %s", err)
+ break
+ }
+ if l.Stream == "stdout" && stdout != nil {
+ io.WriteString(stdout, l.Log)
+ }
+ if l.Stream == "stderr" && stderr != nil {
+ io.WriteString(stderr, l.Log)
+ }
+ }
+ }
+ }
+
+ //stream
+ if stream {
+ var stdinPipe io.ReadCloser
+ if stdin != nil {
+ r, w := io.Pipe()
+ go func() {
+ defer w.Close()
+ defer logrus.Debugf("Closing buffered stdin pipe")
+ io.Copy(w, stdin)
+ }()
+ stdinPipe = r
+ }
+ <-c.Attach(stdinPipe, stdout, stderr)
+ // If we are in stdinonce mode, wait for the process to end
+ // otherwise, simply return
+ if c.Config.StdinOnce && !c.Config.Tty {
+ c.WaitStop(-1 * time.Second)
+ }
+ }
+ return nil
+}
+
+func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+ var (
+ cStdout, cStderr io.ReadCloser
+ cStdin io.WriteCloser
+ wg sync.WaitGroup
+ errors = make(chan error, 3)
+ )
+
+ if stdin != nil && openStdin {
+ cStdin = streamConfig.StdinPipe()
+ wg.Add(1)
+ }
+
+ if stdout != nil {
+ cStdout = streamConfig.StdoutPipe()
+ wg.Add(1)
+ }
+
+ if stderr != nil {
+ cStderr = streamConfig.StderrPipe()
+ wg.Add(1)
+ }
+
+ // Connect stdin of container to the http conn.
+ go func() {
+ if stdin == nil || !openStdin {
+ return
+ }
+ logrus.Debugf("attach: stdin: begin")
+ defer func() {
+ if stdinOnce && !tty {
+ cStdin.Close()
+ } else {
+ // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
+ if cStdout != nil {
+ cStdout.Close()
+ }
+ if cStderr != nil {
+ cStderr.Close()
+ }
+ }
+ wg.Done()
+ logrus.Debugf("attach: stdin: end")
+ }()
+
+ var err error
+ if tty {
+ _, err = copyEscapable(cStdin, stdin)
+ } else {
+ _, err = io.Copy(cStdin, stdin)
+
+ }
+ if err == io.ErrClosedPipe {
+ err = nil
+ }
+ if err != nil {
+ logrus.Errorf("attach: stdin: %s", err)
+ errors <- err
+ return
+ }
+ }()
+
+ attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
+ if stream == nil {
+ return
+ }
+ defer func() {
+ // Make sure stdin gets closed
+ if stdin != nil {
+ stdin.Close()
+ }
+ streamPipe.Close()
+ wg.Done()
+ logrus.Debugf("attach: %s: end", name)
+ }()
+
+ logrus.Debugf("attach: %s: begin", name)
+ _, err := io.Copy(stream, streamPipe)
+ if err == io.ErrClosedPipe {
+ err = nil
+ }
+ if err != nil {
+ logrus.Errorf("attach: %s: %v", name, err)
+ errors <- err
+ }
+ }
+
+ go attachStream("stdout", stdout, cStdout)
+ go attachStream("stderr", stderr, cStderr)
+
+ return promise.Go(func() error {
+ wg.Wait()
+ close(errors)
+ for err := range errors {
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+// Code c/c from io.Copy() modified to handle escape sequence
+func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
+ buf := make([]byte, 32*1024)
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ // ---- Docker addition
+ // char 16 is C-p
+ if nr == 1 && buf[0] == 16 {
+ nr, er = src.Read(buf)
+ // char 17 is C-q
+ if nr == 1 && buf[0] == 17 {
+ if err := src.Close(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+ }
+ // ---- End of docker
+ nw, ew := dst.Write(buf[0:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er == io.EOF {
+ break
+ }
+ if er != nil {
+ err = er
+ break
+ }
+ }
+ return written, err
+}
+
+func (container *Container) networkMounts() []execdriver.Mount {
+ var mounts []execdriver.Mount
+ if container.ResolvConfPath != "" {
+ mounts = append(mounts, execdriver.Mount{
+ Source: container.ResolvConfPath,
+ Destination: "/etc/resolv.conf",
+ Writable: !container.hostConfig.ReadonlyRootfs,
+ Private: true,
+ })
+ }
+ if container.HostnamePath != "" {
+ mounts = append(mounts, execdriver.Mount{
+ Source: container.HostnamePath,
+ Destination: "/etc/hostname",
+ Writable: !container.hostConfig.ReadonlyRootfs,
+ Private: true,
+ })
+ }
+ if container.HostsPath != "" {
+ mounts = append(mounts, execdriver.Mount{
+ Source: container.HostsPath,
+ Destination: "/etc/hosts",
+ Writable: !container.hostConfig.ReadonlyRootfs,
+ Private: true,
+ })
+ }
+ return mounts
+}
+
+func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
+ container.MountPoints[destination] = &mountPoint{
+ Name: name,
+ Driver: volume.DefaultDriverName,
+ Destination: destination,
+ RW: rw,
+ }
+}
+
+func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
+ container.MountPoints[destination] = &mountPoint{
+ Name: vol.Name(),
+ Driver: vol.DriverName(),
+ Destination: destination,
+ RW: rw,
+ Volume: vol,
+ }
+}
+
+func (container *Container) isDestinationMounted(destination string) bool {
+ return container.MountPoints[destination] != nil
+}
+
+func (container *Container) prepareMountPoints() error {
+ for _, config := range container.MountPoints {
+ if len(config.Driver) > 0 {
+ v, err := createVolume(config.Name, config.Driver)
+ if err != nil {
+ return err
+ }
+ config.Volume = v
+ }
+ }
+ return nil
+}
+
+func (container *Container) removeMountPoints() error {
+ for _, m := range container.MountPoints {
+ if m.Volume != nil {
+ if err := removeVolume(m.Volume); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (container *Container) shouldRestart() bool {
+ return container.hostConfig.RestartPolicy.Name == "always" ||
+ (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
+}
+
+func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
+ rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
+ if err != nil {
+ return err
+ }
+
+ if _, err = ioutil.ReadDir(rootfs); err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ path, err := v.Mount()
+ if err != nil {
+ return err
+ }
+
+ if err := copyExistingContents(rootfs, path); err != nil {
+ return err
+ }
+
+ return v.Unmount()
+}
diff --git a/daemon/container_linux.go b/daemon/container_linux.go
new file mode 100644
index 0000000000..3e50ed22fe
--- /dev/null
+++ b/daemon/container_linux.go
@@ -0,0 +1,979 @@
+// +build linux
+
+package daemon
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/daemon/execdriver"
+ "github.com/docker/docker/daemon/network"
+ "github.com/docker/docker/links"
+ "github.com/docker/docker/nat"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/directory"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/ulimit"
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/utils"
+ "github.com/docker/libcontainer/configs"
+ "github.com/docker/libcontainer/devices"
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/types"
+)
+
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+type Container struct {
+ CommonContainer
+
+ // Fields below here are platform specific.
+
+ AppArmorProfile string
+ activeLinks map[string]*links.Link
+}
+
+func killProcessDirectly(container *Container) error {
+ if _, err := container.WaitStop(10 * time.Second); err != nil {
+ // Ensure that we don't kill ourselves
+ if pid := container.GetPid(); pid != 0 {
+ logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
+ if err := syscall.Kill(pid, 9); err != nil {
+ if err != syscall.ESRCH {
+ return err
+ }
+ logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
+ }
+ }
+ }
+ return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+ var (
+ env []string
+ daemon = container.daemon
+ )
+ children, err := daemon.Children(container.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(children) > 0 {
+ container.activeLinks = make(map[string]*links.Link, len(children))
+
+ // If we encounter an error make sure that we rollback any network
+ // config and iptables changes
+ rollback := func() {
+ for _, link := range container.activeLinks {
+ link.Disable()
+ }
+ container.activeLinks = nil
+ }
+
+ for linkAlias, child := range children {
+ if !child.IsRunning() {
+ return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
+ }
+
+ link, err := links.NewLink(
+ container.NetworkSettings.IPAddress,
+ child.NetworkSettings.IPAddress,
+ linkAlias,
+ child.Config.Env,
+ child.Config.ExposedPorts,
+ )
+
+ if err != nil {
+ rollback()
+ return nil, err
+ }
+
+ container.activeLinks[link.Alias()] = link
+ if err := link.Enable(); err != nil {
+ rollback()
+ return nil, err
+ }
+
+ for _, envVar := range link.ToEnv() {
+ env = append(env, envVar)
+ }
+ }
+ }
+ return env, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+ // if a domain name was specified, append it to the hostname (see #7851)
+ fullHostname := container.Config.Hostname
+ if container.Config.Domainname != "" {
+ fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
+ }
+ // Setup environment
+ env := []string{
+ "PATH=" + DefaultPathEnv,
+ "HOSTNAME=" + fullHostname,
+ // Note: we don't set HOME here because it'll get autoset intelligently
+ // based on the value of USER inside dockerinit, but only if it isn't
+ // set already (ie, that can be overridden by setting HOME via -e or ENV
+ // in a Dockerfile).
+ }
+ if container.Config.Tty {
+ env = append(env, "TERM=xterm")
+ }
+ env = append(env, linkedEnv...)
+ // because the env on the container can override certain default values
+ // we need to replace the 'env' keys where they match and append anything
+ // else.
+ env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
+
+ return env
+}
+
+func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
+ device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+ // if there was no error, return the device
+ if err == nil {
+ device.Path = deviceMapping.PathInContainer
+ return append(devs, device), nil
+ }
+
+ // if the device is not a device node
+ // try to see if it's a directory holding many devices
+ if err == devices.ErrNotADevice {
+
+ // check if it is a directory
+ if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
+
+ // mount the internal devices recursively
+ filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
+ childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
+ if e != nil {
+ // ignore the device
+ return nil
+ }
+
+ // add the device to userSpecified devices
+ childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
+ devs = append(devs, childDevice)
+
+ return nil
+ })
+ }
+ }
+
+ if len(devs) > 0 {
+ return devs, nil
+ }
+
+ return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
+}
+
+func populateCommand(c *Container, env []string) error {
+ var en *execdriver.Network
+ if !c.daemon.config.DisableNetwork {
+ en = &execdriver.Network{
+ NamespacePath: c.NetworkSettings.SandboxKey,
+ }
+
+ parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+ if parts[0] == "container" {
+ nc, err := c.getNetworkedContainer()
+ if err != nil {
+ return err
+ }
+ en.ContainerID = nc.ID
+ }
+ }
+
+ ipc := &execdriver.Ipc{}
+
+ if c.hostConfig.IpcMode.IsContainer() {
+ ic, err := c.getIpcContainer()
+ if err != nil {
+ return err
+ }
+ ipc.ContainerID = ic.ID
+ } else {
+ ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
+ }
+
+ pid := &execdriver.Pid{}
+ pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+ uts := &execdriver.UTS{
+ HostUTS: c.hostConfig.UTSMode.IsHost(),
+ }
+
+ // Build lists of devices allowed and created within the container.
+ var userSpecifiedDevices []*configs.Device
+ for _, deviceMapping := range c.hostConfig.Devices {
+ devs, err := getDevicesFromPath(deviceMapping)
+ if err != nil {
+ return err
+ }
+
+ userSpecifiedDevices = append(userSpecifiedDevices, devs...)
+ }
+ allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
+
+ autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+
+ // TODO: this can be removed after lxc-conf is fully deprecated
+ lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
+ if err != nil {
+ return err
+ }
+
+ var rlimits []*ulimit.Rlimit
+ ulimits := c.hostConfig.Ulimits
+
+ // Merge ulimits with daemon defaults
+ ulIdx := make(map[string]*ulimit.Ulimit)
+ for _, ul := range ulimits {
+ ulIdx[ul.Name] = ul
+ }
+ for name, ul := range c.daemon.config.Ulimits {
+ if _, exists := ulIdx[name]; !exists {
+ ulimits = append(ulimits, ul)
+ }
+ }
+
+ for _, limit := range ulimits {
+ rl, err := limit.GetRlimit()
+ if err != nil {
+ return err
+ }
+ rlimits = append(rlimits, rl)
+ }
+
+ resources := &execdriver.Resources{
+ Memory: c.hostConfig.Memory,
+ MemorySwap: c.hostConfig.MemorySwap,
+ CpuShares: c.hostConfig.CpuShares,
+ CpusetCpus: c.hostConfig.CpusetCpus,
+ CpusetMems: c.hostConfig.CpusetMems,
+ CpuPeriod: c.hostConfig.CpuPeriod,
+ CpuQuota: c.hostConfig.CpuQuota,
+ BlkioWeight: c.hostConfig.BlkioWeight,
+ Rlimits: rlimits,
+ OomKillDisable: c.hostConfig.OomKillDisable,
+ }
+
+ processConfig := execdriver.ProcessConfig{
+ Privileged: c.hostConfig.Privileged,
+ Entrypoint: c.Path,
+ Arguments: c.Args,
+ Tty: c.Config.Tty,
+ User: c.Config.User,
+ }
+
+ processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
+ processConfig.Env = env
+
+ c.command = &execdriver.Command{
+ ID: c.ID,
+ Rootfs: c.RootfsPath(),
+ ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
+ InitPath: "/.dockerinit",
+ WorkingDir: c.Config.WorkingDir,
+ Network: en,
+ Ipc: ipc,
+ Pid: pid,
+ UTS: uts,
+ Resources: resources,
+ AllowedDevices: allowedDevices,
+ AutoCreatedDevices: autoCreatedDevices,
+ CapAdd: c.hostConfig.CapAdd,
+ CapDrop: c.hostConfig.CapDrop,
+ ProcessConfig: processConfig,
+ ProcessLabel: c.GetProcessLabel(),
+ MountLabel: c.GetMountLabel(),
+ LxcConfig: lxcConfig,
+ AppArmorProfile: c.AppArmorProfile,
+ CgroupParent: c.hostConfig.CgroupParent,
+ }
+
+ return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+ var (
+ sizeRw, sizeRootfs int64
+ err error
+ driver = container.daemon.driver
+ )
+
+ if err := container.Mount(); err != nil {
+ logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
+ return sizeRw, sizeRootfs
+ }
+ defer container.Unmount()
+
+ initID := fmt.Sprintf("%s-init", container.ID)
+ sizeRw, err = driver.DiffSize(container.ID, initID)
+ if err != nil {
+ logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+ // FIXME: GetSize should return an error. Not changing it now in case
+ // there is a side-effect.
+ sizeRw = -1
+ }
+
+ if _, err = os.Stat(container.basefs); err == nil {
+ if sizeRootfs, err = directory.Size(container.basefs); err != nil {
+ sizeRootfs = -1
+ }
+ }
+ return sizeRw, sizeRootfs
+}
+
+func (container *Container) buildHostnameFile() error {
+ hostnamePath, err := container.GetRootResourcePath("hostname")
+ if err != nil {
+ return err
+ }
+ container.HostnamePath = hostnamePath
+
+ if container.Config.Domainname != "" {
+ return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
+ }
+ return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
+}
+
+func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, error) {
+ var (
+ joinOptions []libnetwork.EndpointOption
+ err error
+ dns []string
+ dnsSearch []string
+ )
+
+ joinOptions = append(joinOptions, libnetwork.JoinOptionHostname(container.Config.Hostname),
+ libnetwork.JoinOptionDomainname(container.Config.Domainname))
+
+ if container.hostConfig.NetworkMode.IsHost() {
+ joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
+ }
+
+ container.HostsPath, err = container.GetRootResourcePath("hosts")
+ if err != nil {
+ return nil, err
+ }
+ joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
+
+ container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
+ if err != nil {
+ return nil, err
+ }
+ joinOptions = append(joinOptions, libnetwork.JoinOptionResolvConfPath(container.ResolvConfPath))
+
+ if len(container.hostConfig.Dns) > 0 {
+ dns = container.hostConfig.Dns
+ } else if len(container.daemon.config.Dns) > 0 {
+ dns = container.daemon.config.Dns
+ }
+
+ for _, d := range dns {
+ joinOptions = append(joinOptions, libnetwork.JoinOptionDNS(d))
+ }
+
+ if len(container.hostConfig.DnsSearch) > 0 {
+ dnsSearch = container.hostConfig.DnsSearch
+ } else if len(container.daemon.config.DnsSearch) > 0 {
+ dnsSearch = container.daemon.config.DnsSearch
+ }
+
+ for _, ds := range dnsSearch {
+ joinOptions = append(joinOptions, libnetwork.JoinOptionDNSSearch(ds))
+ }
+
+ if container.NetworkSettings.SecondaryIPAddresses != nil {
+ name := container.Config.Hostname
+ if container.Config.Domainname != "" {
+ name = name + "." + container.Config.Domainname
+ }
+
+ for _, a := range container.NetworkSettings.SecondaryIPAddresses {
+ joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(name, a.Addr))
+ }
+ }
+
+ var childEndpoints, parentEndpoints []string
+
+ children, err := container.daemon.Children(container.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ for linkAlias, child := range children {
+ _, alias := path.Split(linkAlias)
+ // allow access to the linked container via the alias, real name, and container hostname
+ aliasList := alias + " " + child.Config.Hostname
+ // only add the name if alias isn't equal to the name
+ if alias != child.Name[1:] {
+ aliasList = aliasList + " " + child.Name[1:]
+ }
+ joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(aliasList, child.NetworkSettings.IPAddress))
+ if child.NetworkSettings.EndpointID != "" {
+ childEndpoints = append(childEndpoints, child.NetworkSettings.EndpointID)
+ }
+ }
+
+ for _, extraHost := range container.hostConfig.ExtraHosts {
+ // allow IPv6 addresses in extra hosts; only split on first ":"
+ parts := strings.SplitN(extraHost, ":", 2)
+ joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
+ }
+
+ refs := container.daemon.ContainerGraph().RefPaths(container.ID)
+ for _, ref := range refs {
+ if ref.ParentID == "0" {
+ continue
+ }
+
+ c, err := container.daemon.Get(ref.ParentID)
+ if err != nil {
+ logrus.Error(err)
+ }
+
+ if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
+ logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
+ joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
+ if c.NetworkSettings.EndpointID != "" {
+ parentEndpoints = append(parentEndpoints, c.NetworkSettings.EndpointID)
+ }
+ }
+ }
+
+ linkOptions := options.Generic{
+ netlabel.GenericData: options.Generic{
+ "ParentEndpoints": parentEndpoints,
+ "ChildEndpoints": childEndpoints,
+ },
+ }
+
+ joinOptions = append(joinOptions, libnetwork.JoinOptionGeneric(linkOptions))
+
+ return joinOptions, nil
+}
+
+func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+ if ep == nil {
+ return nil, fmt.Errorf("invalid endpoint while building port map info")
+ }
+
+ if networkSettings == nil {
+ return nil, fmt.Errorf("invalid networksettings while building port map info")
+ }
+
+ driverInfo, err := ep.DriverInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if driverInfo == nil {
+ // It is not an error for epInfo to be nil
+ return networkSettings, nil
+ }
+
+ if mac, ok := driverInfo[netlabel.MacAddress]; ok {
+ networkSettings.MacAddress = mac.(net.HardwareAddr).String()
+ }
+
+ mapData, ok := driverInfo[netlabel.PortMap]
+ if !ok {
+ return networkSettings, nil
+ }
+
+ if portMapping, ok := mapData.([]types.PortBinding); ok {
+ networkSettings.Ports = nat.PortMap{}
+ for _, pp := range portMapping {
+ natPort := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
+ natBndg := nat.PortBinding{HostIp: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
+ networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
+ }
+ }
+
+ return networkSettings, nil
+}
+
+func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+ if ep == nil {
+ return nil, fmt.Errorf("invalid endpoint while building port map info")
+ }
+
+ if networkSettings == nil {
+ return nil, fmt.Errorf("invalid networksettings while building port map info")
+ }
+
+ epInfo := ep.Info()
+ if epInfo == nil {
+ // It is not an error to get an empty endpoint info
+ return networkSettings, nil
+ }
+
+ ifaceList := epInfo.InterfaceList()
+ if len(ifaceList) == 0 {
+ return networkSettings, nil
+ }
+
+ iface := ifaceList[0]
+
+ ones, _ := iface.Address().Mask.Size()
+ networkSettings.IPAddress = iface.Address().IP.String()
+ networkSettings.IPPrefixLen = ones
+
+ if iface.AddressIPv6().IP.To16() != nil {
+ onesv6, _ := iface.AddressIPv6().Mask.Size()
+ networkSettings.GlobalIPv6Address = iface.AddressIPv6().IP.String()
+ networkSettings.GlobalIPv6PrefixLen = onesv6
+ }
+
+ if len(ifaceList) == 1 {
+ return networkSettings, nil
+ }
+
+ networkSettings.SecondaryIPAddresses = make([]network.Address, 0, len(ifaceList)-1)
+ networkSettings.SecondaryIPv6Addresses = make([]network.Address, 0, len(ifaceList)-1)
+ for _, iface := range ifaceList[1:] {
+ ones, _ := iface.Address().Mask.Size()
+ addr := network.Address{Addr: iface.Address().IP.String(), PrefixLen: ones}
+ networkSettings.SecondaryIPAddresses = append(networkSettings.SecondaryIPAddresses, addr)
+
+ if iface.AddressIPv6().IP.To16() != nil {
+ onesv6, _ := iface.AddressIPv6().Mask.Size()
+ addrv6 := network.Address{Addr: iface.AddressIPv6().IP.String(), PrefixLen: onesv6}
+ networkSettings.SecondaryIPv6Addresses = append(networkSettings.SecondaryIPv6Addresses, addrv6)
+ }
+ }
+
+ return networkSettings, nil
+}
+
+func (container *Container) updateJoinInfo(ep libnetwork.Endpoint) error {
+ epInfo := ep.Info()
+ if epInfo == nil {
+ // It is not an error to get an empty endpoint info
+ return nil
+ }
+
+ container.NetworkSettings.Gateway = epInfo.Gateway().String()
+ if epInfo.GatewayIPv6().To16() != nil {
+ container.NetworkSettings.IPv6Gateway = epInfo.GatewayIPv6().String()
+ }
+
+ container.NetworkSettings.SandboxKey = epInfo.SandboxKey()
+
+ return nil
+}
+
+func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error {
+ networkSettings := &network.Settings{NetworkID: n.ID(), EndpointID: ep.ID()}
+
+ networkSettings, err := container.buildPortMapInfo(n, ep, networkSettings)
+ if err != nil {
+ return err
+ }
+
+ networkSettings, err = container.buildEndpointInfo(n, ep, networkSettings)
+ if err != nil {
+ return err
+ }
+
+ if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
+ networkSettings.Bridge = container.daemon.config.Bridge.Iface
+ }
+
+ container.NetworkSettings = networkSettings
+ return nil
+}
+
+func (container *Container) UpdateNetwork() error {
+ n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+ if err != nil {
+ return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+ }
+
+ ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+ if err != nil {
+ return fmt.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+ }
+
+ if err := ep.Leave(container.ID); err != nil {
+ return fmt.Errorf("endpoint leave failed: %v", err)
+
+ }
+
+ joinOptions, err := container.buildJoinOptions()
+ if err != nil {
+ return fmt.Errorf("Update network failed: %v", err)
+ }
+
+ if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+ return fmt.Errorf("endpoint join failed: %v", err)
+ }
+
+ if err := container.updateJoinInfo(ep); err != nil {
+ return fmt.Errorf("Updating join info failed: %v", err)
+ }
+
+ return nil
+}
+
+func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) {
+ var (
+ portSpecs = make(nat.PortSet)
+ bindings = make(nat.PortMap)
+ pbList []types.PortBinding
+ exposeList []types.TransportPort
+ createOptions []libnetwork.EndpointOption
+ )
+
+ if container.Config.PortSpecs != nil {
+ if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
+ return nil, err
+ }
+ container.Config.PortSpecs = nil
+ if err := container.WriteHostConfig(); err != nil {
+ return nil, err
+ }
+ }
+
+ if container.Config.ExposedPorts != nil {
+ portSpecs = container.Config.ExposedPorts
+ }
+
+ if container.hostConfig.PortBindings != nil {
+ for p, b := range container.hostConfig.PortBindings {
+ bindings[p] = []nat.PortBinding{}
+ for _, bb := range b {
+ bindings[p] = append(bindings[p], nat.PortBinding{
+ HostIp: bb.HostIp,
+ HostPort: bb.HostPort,
+ })
+ }
+ }
+ }
+
+ container.NetworkSettings.PortMapping = nil
+
+ ports := make([]nat.Port, len(portSpecs))
+ var i int
+ for p := range portSpecs {
+ ports[i] = p
+ i++
+ }
+ nat.SortPortMap(ports, bindings)
+ for _, port := range ports {
+ expose := types.TransportPort{}
+ expose.Proto = types.ParseProtocol(port.Proto())
+ expose.Port = uint16(port.Int())
+ exposeList = append(exposeList, expose)
+
+ pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
+ binding := bindings[port]
+ for i := 0; i < len(binding); i++ {
+ pbCopy := pb.GetCopy()
+ pbCopy.HostPort = uint16(nat.Port(binding[i].HostPort).Int())
+ pbCopy.HostIP = net.ParseIP(binding[i].HostIp)
+ pbList = append(pbList, pbCopy)
+ }
+
+ if container.hostConfig.PublishAllPorts && len(binding) == 0 {
+ pbList = append(pbList, pb)
+ }
+ }
+
+ createOptions = append(createOptions,
+ libnetwork.CreateOptionPortMapping(pbList),
+ libnetwork.CreateOptionExposedPorts(exposeList))
+
+ if container.Config.MacAddress != "" {
+ mac, err := net.ParseMAC(container.Config.MacAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ genericOption := options.Generic{
+ netlabel.MacAddress: mac,
+ }
+
+ createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
+ }
+
+ return createOptions, nil
+}
+
+func (container *Container) AllocateNetwork() error {
+ mode := container.hostConfig.NetworkMode
+ if container.Config.NetworkDisabled || mode.IsContainer() {
+ return nil
+ }
+
+ var err error
+
+ n, err := container.daemon.netController.NetworkByName(string(mode))
+ if err != nil {
+ return fmt.Errorf("error locating network with name %s: %v", string(mode), err)
+ }
+
+ createOptions, err := container.buildCreateEndpointOptions()
+ if err != nil {
+ return err
+ }
+
+ ep, err := n.CreateEndpoint(container.Name, createOptions...)
+ if err != nil {
+ return err
+ }
+
+ if err := container.updateNetworkSettings(n, ep); err != nil {
+ return err
+ }
+
+ joinOptions, err := container.buildJoinOptions()
+ if err != nil {
+ return err
+ }
+
+ if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+ return err
+ }
+
+ if err := container.updateJoinInfo(ep); err != nil {
+ return fmt.Errorf("Updating join info failed: %v", err)
+ }
+
+ if err := container.WriteHostConfig(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (container *Container) initializeNetworking() error {
+ var err error
+
+ // Make sure NetworkMode has an acceptable value before
+ // initializing networking.
+ if container.hostConfig.NetworkMode == runconfig.NetworkMode("") {
+ container.hostConfig.NetworkMode = runconfig.NetworkMode("bridge")
+ }
+
+ if container.hostConfig.NetworkMode.IsContainer() {
+ // we need to get the hosts files from the container to join
+ nc, err := container.getNetworkedContainer()
+ if err != nil {
+ return err
+ }
+ container.HostnamePath = nc.HostnamePath
+ container.HostsPath = nc.HostsPath
+ container.ResolvConfPath = nc.ResolvConfPath
+ container.Config.Hostname = nc.Config.Hostname
+ container.Config.Domainname = nc.Config.Domainname
+ return nil
+ }
+
+ if container.daemon.config.DisableNetwork {
+ container.Config.NetworkDisabled = true
+ }
+
+ if container.hostConfig.NetworkMode.IsHost() {
+ container.Config.Hostname, err = os.Hostname()
+ if err != nil {
+ return err
+ }
+
+ parts := strings.SplitN(container.Config.Hostname, ".", 2)
+ if len(parts) > 1 {
+ container.Config.Hostname = parts[0]
+ container.Config.Domainname = parts[1]
+ }
+
+ }
+
+ if err := container.AllocateNetwork(); err != nil {
+ return err
+ }
+
+ return container.buildHostnameFile()
+}
+
+// Make sure the config is compatible with the current kernel
+func (container *Container) verifyDaemonSettings() {
+ if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
+ logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
+ container.hostConfig.Memory = 0
+ }
+ if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit {
+ logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
+ container.hostConfig.MemorySwap = -1
+ }
+ if container.daemon.sysInfo.IPv4ForwardingDisabled {
+ logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
+ }
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+ if err := container.Mount(); err != nil {
+ return nil, err
+ }
+ if container.daemon == nil {
+ return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
+ }
+ archive, err := container.daemon.Diff(container)
+ if err != nil {
+ container.Unmount()
+ return nil, err
+ }
+ return ioutils.NewReadCloserWrapper(archive, func() error {
+ err := archive.Close()
+ container.Unmount()
+ return err
+ }),
+ nil
+}
+
+func (container *Container) getIpcContainer() (*Container, error) {
+ containerID := container.hostConfig.IpcMode.Container()
+ c, err := container.daemon.Get(containerID)
+ if err != nil {
+ return nil, err
+ }
+ if !c.IsRunning() {
+ return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
+ }
+ return c, nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+ if container.Config.WorkingDir != "" {
+ container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
+
+ pth, err := container.GetResourcePath(container.Config.WorkingDir)
+ if err != nil {
+ return err
+ }
+
+ pthInfo, err := os.Stat(pth)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ if err := os.MkdirAll(pth, 0755); err != nil {
+ return err
+ }
+ }
+ if pthInfo != nil && !pthInfo.IsDir() {
+ return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
+ }
+ }
+ return nil
+}
+
+func (container *Container) getNetworkedContainer() (*Container, error) {
+ parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
+ switch parts[0] {
+ case "container":
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("no container specified to join network")
+ }
+ nc, err := container.daemon.Get(parts[1])
+ if err != nil {
+ return nil, err
+ }
+ if container == nc {
+ return nil, fmt.Errorf("cannot join own network")
+ }
+ if !nc.IsRunning() {
+ return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
+ }
+ return nc, nil
+ default:
+ return nil, fmt.Errorf("network mode not set to container")
+ }
+}
+
+func (container *Container) ReleaseNetwork() {
+ if container.hostConfig.NetworkMode.IsContainer() || container.daemon.config.DisableNetwork {
+ return
+ }
+
+ n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+ if err != nil {
+ logrus.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+ return
+ }
+
+ ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+ if err != nil {
+ logrus.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+ return
+ }
+
+ if err := ep.Leave(container.ID); err != nil {
+ logrus.Errorf("leaving endpoint failed: %v", err)
+ }
+
+ if err := ep.Delete(); err != nil {
+ logrus.Errorf("deleting endpoint failed: %v", err)
+ }
+
+ container.NetworkSettings = &network.Settings{}
+}
+
+func disableAllActiveLinks(container *Container) {
+ if container.activeLinks != nil {
+ for _, link := range container.activeLinks {
+ link.Disable()
+ }
+ }
+}
+
+func (container *Container) DisableLink(name string) {
+ if container.activeLinks != nil {
+ if link, exists := container.activeLinks[name]; exists {
+ link.Disable()
+ delete(container.activeLinks, name)
+ if err := container.UpdateNetwork(); err != nil {
+ logrus.Debugf("Could not update network to remove link: %v", err)
+ }
+ } else {
+ logrus.Debugf("Could not find active link for %s", name)
+ }
+ }
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+ for _, m := range container.MountPoints {
+ dest, err := container.GetResourcePath(m.Destination)
+ if err != nil {
+ return err
+ }
+
+ if forceSyscall {
+ syscall.Unmount(dest, 0)
+ }
+
+ if m.Volume != nil {
+ if err := m.Volume.Unmount(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/daemon/container_windows.go b/daemon/container_windows.go
new file mode 100644
index 0000000000..0807aabc93
--- /dev/null
+++ b/daemon/container_windows.go
@@ -0,0 +1,171 @@
+// +build windows
+
+package daemon
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/daemon/execdriver"
+ "github.com/docker/docker/pkg/archive"
+)
+
+// TODO Windows. A reasonable default at the moment.
+const DefaultPathEnv = `c:\windows\system32;c:\windows\system32\WindowsPowerShell\v1.0`
+
+type Container struct {
+ CommonContainer
+
+ // Fields below here are platform specific.
+
+ // TODO Windows. Further factoring out of unused fields will be necessary.
+
+ // ---- START OF TEMPORARY DECLARATION ----
+ // TODO Windows. Temporarily keeping fields in to assist in compilation
+ // of the daemon on Windows without affecting many other files in a single
+ // PR, thus making code review significantly harder. These lines will be
+ // removed in subsequent PRs.
+
+ AppArmorProfile string
+ // ---- END OF TEMPORARY DECLARATION ----
+
+}
+
+func killProcessDirectly(container *Container) error {
+ return nil
+}
+
+func (container *Container) setupContainerDns() error {
+ return nil
+}
+
+func (container *Container) updateParentsHosts() error {
+ return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+ return nil, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+ return nil
+}
+
+func (container *Container) initializeNetworking() error {
+ return nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+ return nil
+}
+
+func (container *Container) verifyDaemonSettings() {
+}
+
+func populateCommand(c *Container, env []string) error {
+ en := &execdriver.Network{
+ Mtu: c.daemon.config.Mtu,
+ Interface: nil,
+ }
+
+ // TODO Windows. Appropriate network mode (will refactor as part of
+ // libnetwork. For now, even through bridge not used, let it succeed to
+ // allow the Windows daemon to limp during its bring-up
+ parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+ switch parts[0] {
+ case "none":
+ case "bridge", "": // empty string to support existing containers
+ if !c.Config.NetworkDisabled {
+ network := c.NetworkSettings
+ en.Interface = &execdriver.NetworkInterface{
+ Bridge: network.Bridge,
+ MacAddress: network.MacAddress,
+ }
+ }
+ case "host", "container":
+ return fmt.Errorf("unsupported network mode: %s", c.hostConfig.NetworkMode)
+ default:
+ return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
+ }
+
+ pid := &execdriver.Pid{}
+
+ // TODO Windows. This can probably be factored out.
+ pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+ // TODO Windows. Resource controls to be implemented later.
+ resources := &execdriver.Resources{}
+
+ // TODO Windows. Further refactoring required (privileged/user)
+ processConfig := execdriver.ProcessConfig{
+ Privileged: c.hostConfig.Privileged,
+ Entrypoint: c.Path,
+ Arguments: c.Args,
+ Tty: c.Config.Tty,
+ User: c.Config.User,
+ }
+
+ processConfig.Env = env
+
+ // TODO Windows: Factor out remainder of unused fields.
+ c.command = &execdriver.Command{
+ ID: c.ID,
+ Rootfs: c.RootfsPath(),
+ ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
+ InitPath: "/.dockerinit",
+ WorkingDir: c.Config.WorkingDir,
+ Network: en,
+ Pid: pid,
+ Resources: resources,
+ CapAdd: c.hostConfig.CapAdd,
+ CapDrop: c.hostConfig.CapDrop,
+ ProcessConfig: processConfig,
+ ProcessLabel: c.GetProcessLabel(),
+ MountLabel: c.GetMountLabel(),
+ }
+
+ return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+ // TODO Windows
+ return 0, 0
+}
+
+func (container *Container) AllocateNetwork() error {
+
+ // TODO Windows. This needs reworking with libnetwork. In the
+ // proof-of-concept for //build conference, the Windows daemon
+ // invoked eng.Job("allocate_interface) passing through
+ // RequestedMac.
+
+ return nil
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+ if container.IsRunning() {
+ return nil, fmt.Errorf("Cannot export a running container.")
+ }
+ // TODO Windows. Implementation (different to Linux)
+ return nil, nil
+}
+
+func (container *Container) ReleaseNetwork() {
+ // TODO Windows. Rework with libnetwork
+}
+
+func (container *Container) RestoreNetwork() error {
+ // TODO Windows. Rework with libnetwork
+ return nil
+}
+
+func disableAllActiveLinks(container *Container) {
+}
+
+func (container *Container) DisableLink(name string) {
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+ return nil
+}
diff --git a/daemon/copy.go b/daemon/copy.go
new file mode 100644
index 0000000000..dec30d8f37
--- /dev/null
+++ b/daemon/copy.go
@@ -0,0 +1,16 @@
+package daemon
+
+import "io"
+
+func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if res[0] == '/' {
+ res = res[1:]
+ }
+
+ return container.Copy(res)
+}
diff --git a/daemon/create.go b/daemon/create.go
index db60355071..b3e50e56dc 100644
--- a/daemon/create.go
+++ b/daemon/create.go
@@ -2,10 +2,14 @@ package daemon
import (
"fmt"
+ "os"
+ "path/filepath"
+ "strings"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/parsers"
+ "github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
"github.com/docker/libcontainer/label"
)
@@ -16,6 +20,13 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos
return "", warnings, err
}
+ // The check for a valid workdir path is made on the server rather than in the
+ // client. This is because we don't know the type of path (Linux or Windows)
+ // to validate on the client.
+ if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) {
+ return "", warnings, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir)
+ }
+
container, buildWarnings, err := daemon.Create(config, hostConfig, name)
if err != nil {
if daemon.Graph().IsNotExist(err, config.Image) {
@@ -79,17 +90,51 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
if err := daemon.createRootfs(container); err != nil {
return nil, nil, err
}
- if hostConfig != nil {
- if err := daemon.setHostConfig(container, hostConfig); err != nil {
- return nil, nil, err
- }
+ if err := daemon.setHostConfig(container, hostConfig); err != nil {
+ return nil, nil, err
}
if err := container.Mount(); err != nil {
return nil, nil, err
}
defer container.Unmount()
- if err := container.prepareVolumes(); err != nil {
- return nil, nil, err
+
+ for spec := range config.Volumes {
+ var (
+ name, destination string
+ parts = strings.Split(spec, ":")
+ )
+ switch len(parts) {
+ case 2:
+ name, destination = parts[0], filepath.Clean(parts[1])
+ default:
+ name = stringid.GenerateRandomID()
+ destination = filepath.Clean(parts[0])
+ }
+ // Skip volumes for which we already have something mounted on that
+ // destination because of a --volume-from.
+ if container.isDestinationMounted(destination) {
+ continue
+ }
+ path, err := container.GetResourcePath(destination)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ stat, err := os.Stat(path)
+ if err == nil && !stat.IsDir() {
+ return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
+ }
+
+ v, err := createVolume(name, config.VolumeDriver)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := container.copyImagePathContent(v, destination); err != nil {
+ return nil, nil, err
+ }
+
+ container.addMountPointWithVolume(destination, v, true)
}
if err := container.ToDisk(); err != nil {
return nil, nil, err
@@ -106,9 +151,6 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run
if err != nil {
return nil, err
}
- if !c.IsRunning() {
- return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)
- }
return label.DupSecOpt(c.ProcessLabel), nil
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index 05de402174..0246dd0a86 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -1,10 +1,10 @@
package daemon
import (
- "bytes"
"fmt"
"io"
"io/ioutil"
+ "net"
"os"
"path"
"path/filepath"
@@ -15,6 +15,9 @@ import (
"time"
"github.com/docker/libcontainer/label"
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/options"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
@@ -22,12 +25,10 @@ import (
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/daemon/execdriver/execdrivers"
- "github.com/docker/docker/daemon/execdriver/lxc"
"github.com/docker/docker/daemon/graphdriver"
_ "github.com/docker/docker/daemon/graphdriver/vfs"
+ "github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/network"
- "github.com/docker/docker/daemon/networkdriver/bridge"
- "github.com/docker/docker/engine"
"github.com/docker/docker/graph"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
@@ -38,8 +39,6 @@ import (
"github.com/docker/docker/pkg/namesgenerator"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
- "github.com/docker/docker/pkg/pidfile"
- "github.com/docker/docker/pkg/resolvconf"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/truncindex"
@@ -47,11 +46,12 @@ import (
"github.com/docker/docker/runconfig"
"github.com/docker/docker/trust"
"github.com/docker/docker/utils"
- "github.com/docker/docker/volumes"
-
- "github.com/go-fsnotify/fsnotify"
+ volumedrivers "github.com/docker/docker/volume/drivers"
+ "github.com/docker/docker/volume/local"
)
+const defaultVolumesPathName = "volumes"
+
var (
validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
@@ -102,8 +102,6 @@ type Daemon struct {
repositories *graph.TagStore
idIndex *truncindex.TruncIndex
sysInfo *sysinfo.SysInfo
- volumes *volumes.Repository
- eng *engine.Engine
config *Config
containerGraph *graphdb.Database
driver graphdriver.Driver
@@ -112,14 +110,8 @@ type Daemon struct {
defaultLogConfig runconfig.LogConfig
RegistryService *registry.Service
EventsService *events.Events
-}
-
-// Install installs daemon capabilities to eng.
-func (daemon *Daemon) Install(eng *engine.Engine) error {
- // FIXME: this hack is necessary for legacy integration tests to access
- // the daemon object.
- eng.HackSetGlobalVar("httpapi.daemon", daemon)
- return nil
+ netController libnetwork.NetworkController
+ root string
}
// Get looks for a container using the provided information, which could be
@@ -166,10 +158,14 @@ func (daemon *Daemon) containerRoot(id string) string {
// This is typically done at startup.
func (daemon *Daemon) load(id string) (*Container, error) {
container := &Container{
- root: daemon.containerRoot(id),
- State: NewState(),
- execCommands: newExecStore(),
+ CommonContainer: CommonContainer{
+ State: NewState(),
+ root: daemon.containerRoot(id),
+ MountPoints: make(map[string]*mountPoint),
+ execCommands: newExecStore(),
+ },
}
+
if err := container.FromDisk(); err != nil {
return nil, err
}
@@ -217,27 +213,24 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
// we'll waste time if we update it for every container
daemon.idIndex.Add(container.ID)
- container.registerVolumes()
+ if err := daemon.verifyOldVolumesInfo(container); err != nil {
+ return err
+ }
+
+ if err := container.prepareMountPoints(); err != nil {
+ return err
+ }
- // FIXME: if the container is supposed to be running but is not, auto restart it?
- // if so, then we need to restart monitor and init a new lock
- // If the container is supposed to be running, make sure of it
if container.IsRunning() {
logrus.Debugf("killing old running container %s", container.ID)
container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
- // We only have to handle this for lxc because the other drivers will ensure that
- // no processes are left when docker dies
- if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
- lxc.KillLxc(container.ID, 9)
- } else {
- // use the current driver and ensure that the container is dead x.x
- cmd := &execdriver.Command{
- ID: container.ID,
- }
- daemon.execDriver.Terminate(cmd)
+ // use the current driver and ensure that the container is dead x.x
+ cmd := &execdriver.Command{
+ ID: container.ID,
}
+ daemon.execDriver.Terminate(cmd)
if err := container.Unmount(); err != nil {
logrus.Debugf("unmount error %s", err)
@@ -266,10 +259,15 @@ func (daemon *Daemon) ensureName(container *Container) error {
}
func (daemon *Daemon) restore() error {
+ type cr struct {
+ container *Container
+ registered bool
+ }
+
var (
debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
- containers = make(map[string]*Container)
currentDriver = daemon.driver.String()
+ containers = make(map[string]*cr)
)
if !debug {
@@ -295,14 +293,12 @@ func (daemon *Daemon) restore() error {
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
logrus.Debugf("Loaded container %v", container.ID)
- containers[container.ID] = container
+ containers[container.ID] = &cr{container: container}
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
- registeredContainers := []*Container{}
-
if entities := daemon.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if !debug && logrus.GetLevel() == logrus.InfoLevel {
@@ -311,50 +307,43 @@ func (daemon *Daemon) restore() error {
e := entities[p]
- if container, ok := containers[e.ID()]; ok {
- if err := daemon.register(container, false); err != nil {
- logrus.Debugf("Failed to register container %s: %s", container.ID, err)
- }
-
- registeredContainers = append(registeredContainers, container)
-
- // delete from the map so that a new name is not automatically generated
- delete(containers, e.ID())
+ if c, ok := containers[e.ID()]; ok {
+ c.registered = true
}
}
}
- // Any containers that are left over do not exist in the graph
- for _, container := range containers {
- // Try to set the default name for a container if it exists prior to links
- container.Name, err = daemon.generateNewName(container.ID)
- if err != nil {
- logrus.Debugf("Setting default id - %s", err)
- }
+ group := sync.WaitGroup{}
+ for _, c := range containers {
+ group.Add(1)
- if err := daemon.register(container, false); err != nil {
- logrus.Debugf("Failed to register container %s: %s", container.ID, err)
- }
+ go func(container *Container, registered bool) {
+ defer group.Done()
- registeredContainers = append(registeredContainers, container)
- }
+ if !registered {
+ // Try to set the default name for a container if it exists prior to links
+ container.Name, err = daemon.generateNewName(container.ID)
+ if err != nil {
+ logrus.Debugf("Setting default id - %s", err)
+ }
+ }
- // check the restart policy on the containers and restart any container with
- // the restart policy of "always"
- if daemon.config.AutoRestart {
- logrus.Debug("Restarting containers...")
+ if err := daemon.register(container, false); err != nil {
+ logrus.Debugf("Failed to register container %s: %s", container.ID, err)
+ }
- for _, container := range registeredContainers {
- if container.hostConfig.RestartPolicy.Name == "always" ||
- (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) {
+ // check the restart policy on the containers and restart any container with
+ // the restart policy of "always"
+ if daemon.config.AutoRestart && container.shouldRestart() {
logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil {
logrus.Debugf("Failed to start container %s: %s", container.ID, err)
}
}
- }
+ }(c.container, c.registered)
}
+ group.Wait()
if !debug {
if logrus.GetLevel() == logrus.InfoLevel {
@@ -366,61 +355,6 @@ func (daemon *Daemon) restore() error {
return nil
}
-// set up the watch on the host's /etc/resolv.conf so that we can update container's
-// live resolv.conf when the network changes on the host
-func (daemon *Daemon) setupResolvconfWatcher() error {
-
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- return err
- }
-
- //this goroutine listens for the events on the watch we add
- //on the resolv.conf file on the host
- go func() {
- for {
- select {
- case event := <-watcher.Events:
- if event.Name == "/etc/resolv.conf" &&
- (event.Op&(fsnotify.Write|fsnotify.Create) != 0) {
- // verify a real change happened before we go further--a file write may have happened
- // without an actual change to the file
- updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
- if err != nil {
- logrus.Debugf("Error retrieving updated host resolv.conf: %v", err)
- } else if updatedResolvConf != nil {
- // because the new host resolv.conf might have localhost nameservers..
- updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.Bridge.EnableIPv6)
- if modified {
- // changes have occurred during localhost cleanup: generate an updated hash
- newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf))
- if err != nil {
- logrus.Debugf("Error generating hash of new resolv.conf: %v", err)
- } else {
- newResolvConfHash = newHash
- }
- }
- logrus.Debug("host network resolv.conf changed--walking container list for updates")
- contList := daemon.containers.List()
- for _, container := range contList {
- if err := container.updateResolvConf(updatedResolvConf, newResolvConfHash); err != nil {
- logrus.Debugf("Error on resolv.conf update check for container ID: %s: %v", container.ID, err)
- }
- }
- }
- }
- case err := <-watcher.Errors:
- logrus.Debugf("host resolv.conf notify error: %v", err)
- }
- }
- }()
-
- if err := watcher.Add("/etc"); err != nil {
- return err
- }
- return nil
-}
-
func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
if config != nil {
if config.PortSpecs != nil {
@@ -593,22 +527,25 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
container := &Container{
- // FIXME: we should generate the ID here instead of receiving it as an argument
- ID: id,
- Created: time.Now().UTC(),
- Path: entrypoint,
- Args: args, //FIXME: de-duplicate from config
- Config: config,
- hostConfig: &runconfig.HostConfig{},
- ImageID: imgID,
- NetworkSettings: &network.Settings{},
- Name: name,
- Driver: daemon.driver.String(),
- ExecDriver: daemon.execDriver.Name(),
- State: NewState(),
- execCommands: newExecStore(),
+ CommonContainer: CommonContainer{
+ ID: id, // FIXME: we should generate the ID here instead of receiving it as an argument
+ Created: time.Now().UTC(),
+ Path: entrypoint,
+ Args: args, //FIXME: de-duplicate from config
+ Config: config,
+ hostConfig: &runconfig.HostConfig{},
+ ImageID: imgID,
+ NetworkSettings: &network.Settings{},
+ Name: name,
+ Driver: daemon.driver.String(),
+ ExecDriver: daemon.execDriver.Name(),
+ State: NewState(),
+ execCommands: newExecStore(),
+ MountPoints: map[string]*mountPoint{},
+ },
}
container.root = daemon.containerRoot(container.ID)
+
return container, err
}
@@ -707,14 +644,14 @@ func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error
func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links {
- parts, err := parsers.PartParser("name:alias", l)
+ name, alias, err := parsers.ParseLink(l)
if err != nil {
return err
}
- child, err := daemon.Get(parts["name"])
+ child, err := daemon.Get(name)
if err != nil {
//An error from daemon.Get() means this name could not be found
- return fmt.Errorf("Could not get container for %s", parts["name"])
+ return fmt.Errorf("Could not get container for %s", name)
}
for child.hostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
@@ -726,7 +663,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
if child.hostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
- if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
+ if err := daemon.RegisterLink(container, child, alias); err != nil {
return err
}
}
@@ -741,44 +678,19 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
return nil
}
-// FIXME: harmonize with NewGraph()
-func NewDaemon(config *Config, eng *engine.Engine, registryService *registry.Service) (*Daemon, error) {
- daemon, err := NewDaemonFromDirectory(config, eng, registryService)
- if err != nil {
- return nil, err
- }
- return daemon, nil
-}
-
-func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService *registry.Service) (*Daemon, error) {
- if config.Mtu == 0 {
- config.Mtu = getDefaultNetworkMtu()
- }
+func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
// Check for mutually incompatible config options
if config.Bridge.Iface != "" && config.Bridge.IP != "" {
return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
}
- if !config.Bridge.EnableIptables && !config.Bridge.InterContainerCommunication {
+ if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication {
return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
}
- if !config.Bridge.EnableIptables && config.Bridge.EnableIpMasq {
- config.Bridge.EnableIpMasq = false
+ if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq {
+ config.Bridge.EnableIPMasq = false
}
config.DisableNetwork = config.Bridge.Iface == disableNetworkBridge
- // Claim the pidfile first, to avoid any and all unexpected race conditions.
- // Some of the init doesn't need a pidfile lock - but let's not try to be smart.
- if config.Pidfile != "" {
- file, err := pidfile.New(config.Pidfile)
- if err != nil {
- return nil, err
- }
- eng.OnShutdown(func() {
- // Always release the pidfile last, just in case
- file.Remove()
- })
- }
-
// Check that the system is supported and we have sufficient privileges
if runtime.GOOS != "linux" {
return nil, fmt.Errorf("The Docker daemon is only supported on linux")
@@ -790,6 +702,9 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
return nil, err
}
+ // set up SIGUSR1 handler to dump Go routine stacks
+ setupSigusr1Trap()
+
// set up the tmpDir to use a canonical path
tmp, err := tempDir(config.Root)
if err != nil {
@@ -826,17 +741,30 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
}
logrus.Debugf("Using graph driver %s", driver)
- // register cleanup for graph driver
- eng.OnShutdown(func() {
- if err := driver.Cleanup(); err != nil {
- logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+
+ d := &Daemon{}
+ d.driver = driver
+
+ defer func() {
+ if err != nil {
+ if err := d.Shutdown(); err != nil {
+ logrus.Error(err)
+ }
}
- })
+ }()
+
+ // Verify logging driver type
+ if config.LogConfig.Type != "none" {
+ if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil {
+ return nil, fmt.Errorf("error finding the logging driver: %v", err)
+ }
+ }
+ logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if config.EnableSelinuxSupport {
if selinuxEnabled() {
// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
- if driver.String() == "btrfs" {
+ if d.driver.String() == "btrfs" {
return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver")
}
logrus.Debug("SELinux enabled successfully")
@@ -854,25 +782,21 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
}
// Migrate the container if it is aufs and aufs is enabled
- if err = migrateIfAufs(driver, config.Root); err != nil {
+ if err := migrateIfAufs(d.driver, config.Root); err != nil {
return nil, err
}
logrus.Debug("Creating images graph")
- g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
+ g, err := graph.NewGraph(path.Join(config.Root, "graph"), d.driver)
if err != nil {
return nil, err
}
- volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
- if err != nil {
- return nil, err
- }
-
- volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
+ volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
if err != nil {
return nil, err
}
+ volumedrivers.Register(volumesDriver, volumesDriver.Name())
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
@@ -897,14 +821,15 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
Events: eventsService,
Trust: trustService,
}
- repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), tagCfg)
+ repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
}
if !config.DisableNetwork {
- if err := bridge.InitDriver(&config.Bridge); err != nil {
- return nil, fmt.Errorf("Error initializing Bridge: %v", err)
+ d.netController, err = initNetworkController(config)
+ if err != nil {
+ return nil, fmt.Errorf("Error initializing network controller: %v", err)
}
}
@@ -913,12 +838,8 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
if err != nil {
return nil, err
}
- // register graph close on shutdown
- eng.OnShutdown(func() {
- if err := graph.Close(); err != nil {
- logrus.Errorf("Error during container graph.Close(): %v", err)
- }
- })
+
+ d.containerGraph = graph
localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
sysInitPath := utils.DockerInitPath(localCopy)
@@ -941,72 +862,157 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService
}
sysInfo := sysinfo.New(false)
- const runDir = "/var/run/docker"
- ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, runDir, config.Root, sysInitPath, sysInfo)
+ ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo)
if err != nil {
return nil, err
}
- daemon := &Daemon{
- ID: trustKey.PublicKey().KeyID(),
- repository: daemonRepo,
- containers: &contStore{s: make(map[string]*Container)},
- execCommands: newExecStore(),
- graph: g,
- repositories: repositories,
- idIndex: truncindex.NewTruncIndex([]string{}),
- sysInfo: sysInfo,
- volumes: volumes,
- config: config,
- containerGraph: graph,
- driver: driver,
- sysInitPath: sysInitPath,
- execDriver: ed,
- eng: eng,
- statsCollector: newStatsCollector(1 * time.Second),
- defaultLogConfig: config.LogConfig,
- RegistryService: registryService,
- EventsService: eventsService,
- }
+ d.ID = trustKey.PublicKey().KeyID()
+ d.repository = daemonRepo
+ d.containers = &contStore{s: make(map[string]*Container)}
+ d.execCommands = newExecStore()
+ d.graph = g
+ d.repositories = repositories
+ d.idIndex = truncindex.NewTruncIndex([]string{})
+ d.sysInfo = sysInfo
+ d.config = config
+ d.sysInitPath = sysInitPath
+ d.execDriver = ed
+ d.statsCollector = newStatsCollector(1 * time.Second)
+ d.defaultLogConfig = config.LogConfig
+ d.RegistryService = registryService
+ d.EventsService = eventsService
+ d.root = config.Root
- eng.OnShutdown(func() {
- if err := daemon.shutdown(); err != nil {
- logrus.Errorf("Error during daemon.shutdown(): %v", err)
- }
- })
-
- if err := daemon.restore(); err != nil {
+ if err := d.restore(); err != nil {
return nil, err
}
- // set up filesystem watch on resolv.conf for network changes
- if err := daemon.setupResolvconfWatcher(); err != nil {
- return nil, err
- }
-
- return daemon, nil
+ return d, nil
}
-func (daemon *Daemon) shutdown() error {
- group := sync.WaitGroup{}
- logrus.Debug("starting clean shutdown of all containers...")
- for _, container := range daemon.List() {
- c := container
- if c.IsRunning() {
- logrus.Debugf("stopping %s", c.ID)
- group.Add(1)
+func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
+ controller, err := libnetwork.New()
+ if err != nil {
+ return nil, fmt.Errorf("error obtaining controller instance: %v", err)
+ }
- go func() {
- defer group.Done()
- if err := c.KillSig(15); err != nil {
- logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
- }
- c.WaitStop(-1 * time.Second)
- logrus.Debugf("container stopped %s", c.ID)
- }()
+ // Initialize default driver "null"
+
+ if err := controller.ConfigureNetworkDriver("null", options.Generic{}); err != nil {
+ return nil, fmt.Errorf("Error initializing null driver: %v", err)
+ }
+
+ // Initialize default network on "null"
+ if _, err := controller.NewNetwork("null", "none"); err != nil {
+ return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
+ }
+
+ // Initialize default driver "host"
+ if err := controller.ConfigureNetworkDriver("host", options.Generic{}); err != nil {
+ return nil, fmt.Errorf("Error initializing host driver: %v", err)
+ }
+
+ // Initialize default network on "host"
+ if _, err := controller.NewNetwork("host", "host"); err != nil {
+ return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
+ }
+
+ // Initialize default driver "bridge"
+ option := options.Generic{
+ "EnableIPForwarding": config.Bridge.EnableIPForward}
+
+ if err := controller.ConfigureNetworkDriver("bridge", options.Generic{netlabel.GenericData: option}); err != nil {
+ return nil, fmt.Errorf("Error initializing bridge driver: %v", err)
+ }
+
+ netOption := options.Generic{
+ "BridgeName": config.Bridge.Iface,
+ "Mtu": config.Mtu,
+ "EnableIPTables": config.Bridge.EnableIPTables,
+ "EnableIPMasquerade": config.Bridge.EnableIPMasq,
+ "EnableICC": config.Bridge.InterContainerCommunication,
+ "EnableUserlandProxy": config.Bridge.EnableUserlandProxy,
+ }
+
+ if config.Bridge.IP != "" {
+ ip, bipNet, err := net.ParseCIDR(config.Bridge.IP)
+ if err != nil {
+ return nil, err
+ }
+
+ bipNet.IP = ip
+ netOption["AddressIPv4"] = bipNet
+ }
+
+ if config.Bridge.FixedCIDR != "" {
+ _, fCIDR, err := net.ParseCIDR(config.Bridge.FixedCIDR)
+ if err != nil {
+ return nil, err
+ }
+
+ netOption["FixedCIDR"] = fCIDR
+ }
+
+ if config.Bridge.FixedCIDRv6 != "" {
+ _, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6)
+ if err != nil {
+ return nil, err
+ }
+
+ netOption["FixedCIDRv6"] = fCIDRv6
+ }
+
+ // --ip processing
+ if config.Bridge.DefaultIP != nil {
+ netOption["DefaultBindingIP"] = config.Bridge.DefaultIP
+ }
+
+ // Initialize default network on "bridge" with the same name
+ _, err = controller.NewNetwork("bridge", "bridge",
+ libnetwork.NetworkOptionGeneric(options.Generic{
+ netlabel.GenericData: netOption,
+ netlabel.EnableIPv6: config.Bridge.EnableIPv6,
+ }))
+ if err != nil {
+ return nil, fmt.Errorf("Error creating default \"bridge\" network: %v", err)
+ }
+
+ return controller, nil
+}
+
+func (daemon *Daemon) Shutdown() error {
+ if daemon.containerGraph != nil {
+ if err := daemon.containerGraph.Close(); err != nil {
+ logrus.Errorf("Error during container graph.Close(): %v", err)
}
}
- group.Wait()
+ if daemon.driver != nil {
+ if err := daemon.driver.Cleanup(); err != nil {
+ logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+ }
+ }
+ if daemon.containers != nil {
+ group := sync.WaitGroup{}
+ logrus.Debug("starting clean shutdown of all containers...")
+ for _, container := range daemon.List() {
+ c := container
+ if c.IsRunning() {
+ logrus.Debugf("stopping %s", c.ID)
+ group.Add(1)
+
+ go func() {
+ defer group.Done()
+ if err := c.KillSig(15); err != nil {
+ logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
+ }
+ c.WaitStop(-1 * time.Second)
+ logrus.Debugf("container stopped %s", c.ID)
+ }()
+ }
+ }
+ group.Wait()
+ }
return nil
}
@@ -1045,22 +1051,6 @@ func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback e
return daemon.execDriver.Run(c.command, pipes, startCallback)
}
-func (daemon *Daemon) Pause(c *Container) error {
- if err := daemon.execDriver.Pause(c.command); err != nil {
- return err
- }
- c.SetPaused()
- return nil
-}
-
-func (daemon *Daemon) Unpause(c *Container) error {
- if err := daemon.execDriver.Unpause(c.command); err != nil {
- return err
- }
- c.SetUnpaused()
- return nil
-}
-
func (daemon *Daemon) Kill(c *Container, sig int) error {
return daemon.execDriver.Kill(c.command, sig)
}
@@ -1087,26 +1077,6 @@ func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface
return nil
}
-// Nuke kills all containers then removes all content
-// from the content root, including images, volumes and
-// container filesystems.
-// Again: this will remove your entire docker daemon!
-// FIXME: this is deprecated, and only used in legacy
-// tests. Please remove.
-func (daemon *Daemon) Nuke() error {
- var wg sync.WaitGroup
- for _, container := range daemon.List() {
- wg.Add(1)
- go func(c *Container) {
- c.Kill()
- wg.Done()
- }(container)
- }
- wg.Wait()
-
- return os.RemoveAll(daemon.config.Root)
-}
-
// FIXME: this is a convenience function for integration tests
// which need direct access to daemon.graph.
// Once the tests switch to using engine and jobs, this method
@@ -1190,14 +1160,14 @@ func checkKernel() error {
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
- // the circumstances of pre-3.8 crashes are clearer.
+ // the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
if k, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("%s", err)
} else {
- if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+ if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 {
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
- logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+ logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String())
}
}
}
@@ -1231,15 +1201,30 @@ func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]stri
if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
}
+ if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod {
+ warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
+ hostConfig.CpuPeriod = 0
+ }
if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota {
warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
hostConfig.CpuQuota = 0
}
+ if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
+ return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
+ }
+ if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable {
+ hostConfig.OomKillDisable = false
+ return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
+ }
return warnings, nil
}
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+ if err := daemon.registerMountPoints(container, hostConfig); err != nil {
+ return err
+ }
+
container.Lock()
defer container.Unlock()
if err := parseSecurityOpt(container, hostConfig); err != nil {
@@ -1253,6 +1238,5 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
container.hostConfig = hostConfig
container.toDisk()
-
return nil
}
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index 43030b6f9b..d4c4be3d86 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -1,11 +1,17 @@
package daemon
import (
- "github.com/docker/docker/pkg/graphdb"
- "github.com/docker/docker/pkg/truncindex"
+ "fmt"
+ "io/ioutil"
"os"
"path"
+ "path/filepath"
"testing"
+
+ "github.com/docker/docker/pkg/graphdb"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/truncindex"
+ "github.com/docker/docker/volume"
)
//
@@ -14,24 +20,38 @@ import (
func TestGet(t *testing.T) {
c1 := &Container{
- ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
- Name: "tender_bardeen",
+ CommonContainer: CommonContainer{
+ ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+ Name: "tender_bardeen",
+ },
}
+
c2 := &Container{
- ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
- Name: "drunk_hawking",
+ CommonContainer: CommonContainer{
+ ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
+ Name: "drunk_hawking",
+ },
}
+
c3 := &Container{
- ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
- Name: "3cdbd1aa",
+ CommonContainer: CommonContainer{
+ ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
+ Name: "3cdbd1aa",
+ },
}
+
c4 := &Container{
- ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
- Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+ CommonContainer: CommonContainer{
+ ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
+ Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+ },
}
+
c5 := &Container{
- ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
- Name: "d22d69a2b896",
+ CommonContainer: CommonContainer{
+ ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
+ Name: "d22d69a2b896",
+ },
}
store := &contStore{
@@ -99,3 +119,89 @@ func TestGet(t *testing.T) {
os.Remove(daemonTestDbPath)
}
+
+func TestLoadWithVolume(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "docker-daemon-test-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+ containerPath := filepath.Join(tmp, containerId)
+ if err = os.MkdirAll(containerPath, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ hostVolumeId := stringid.GenerateRandomID()
+ volumePath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
+
+ config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
+"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
+"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
+"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
+"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
+"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
+"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
+"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
+"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}},
+"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
+"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
+"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
+"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
+"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
+"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}`
+
+ cfg := fmt.Sprintf(config, volumePath)
+ if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
+"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
+"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
+"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
+ if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = os.MkdirAll(volumePath, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ daemon := &Daemon{
+ repository: tmp,
+ root: tmp,
+ }
+
+ c, err := daemon.load(containerId)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = daemon.verifyOldVolumesInfo(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(c.MountPoints) != 1 {
+ t.Fatalf("Expected 1 volume mounted, was 0\n")
+ }
+
+ m := c.MountPoints["/vol1"]
+ if m.Name != hostVolumeId {
+ t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
+ }
+
+ if m.Destination != "/vol1" {
+ t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination)
+ }
+
+ if !m.RW {
+ t.Fatalf("Expected mount point to be RW but it was not\n")
+ }
+
+ if m.Driver != volume.DefaultDriverName {
+ t.Fatalf("Expected mount driver local, was %s\n", m.Driver)
+ }
+}
diff --git a/daemon/daemon_zfs.go b/daemon/daemon_zfs.go
new file mode 100644
index 0000000000..2fc1d8707e
--- /dev/null
+++ b/daemon/daemon_zfs.go
@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_zfs,linux
+
+package daemon
+
+import (
+ _ "github.com/docker/docker/daemon/graphdriver/zfs"
+)
diff --git a/daemon/debugtrap.go b/daemon/debugtrap.go
new file mode 100644
index 0000000000..949bf3d6fe
--- /dev/null
+++ b/daemon/debugtrap.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package daemon
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+
+ psignal "github.com/docker/docker/pkg/signal"
+)
+
+func setupSigusr1Trap() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, syscall.SIGUSR1)
+ go func() {
+ for range c {
+ psignal.DumpStacks()
+ }
+ }()
+}
diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go
new file mode 100644
index 0000000000..1600e8b290
--- /dev/null
+++ b/daemon/debugtrap_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux,!darwin,!freebsd
+
+package daemon
+
+func setupSigusr1Trap() {
+ return
+}
diff --git a/daemon/delete.go b/daemon/delete.go
index 464193b283..830bbbe267 100644
--- a/daemon/delete.go
+++ b/daemon/delete.go
@@ -22,8 +22,6 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
name, err := GetFullContainerName(name)
if err != nil {
return err
- // TODO: why was just job.Error(err) without return if the function cannot continue w/o container name?
- //job.Error(err)
}
parent, n := path.Split(name)
if parent == "/" {
@@ -35,68 +33,42 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
}
parentContainer, _ := daemon.Get(pe.ID())
+ if err := daemon.ContainerGraph().Delete(name); err != nil {
+ return err
+ }
+
if parentContainer != nil {
parentContainer.DisableLink(n)
}
- if err := daemon.ContainerGraph().Delete(name); err != nil {
- return err
- }
return nil
}
- if container != nil {
- // stop collection of stats for the container regardless
- // if stats are currently getting collected.
- daemon.statsCollector.stopCollection(container)
- if container.IsRunning() {
- if config.ForceRemove {
- if err := container.Kill(); err != nil {
- return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
- }
- } else {
- return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
- }
- }
+ if err := daemon.rm(container, config.ForceRemove); err != nil {
+ return fmt.Errorf("Cannot destroy container %s: %v", name, err)
+ }
- if config.ForceRemove {
- if err := daemon.ForceRm(container); err != nil {
- logrus.Errorf("Cannot destroy container %s: %v", name, err)
- }
- } else {
- if err := daemon.Rm(container); err != nil {
- return fmt.Errorf("Cannot destroy container %s: %v", name, err)
- }
- }
- container.LogEvent("destroy")
- if config.RemoveVolume {
- daemon.DeleteVolumes(container.VolumePaths())
- }
+ container.LogEvent("destroy")
+
+ if config.RemoveVolume {
+ container.removeMountPoints()
}
return nil
}
-func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
- for id := range volumeIDs {
- if err := daemon.volumes.Delete(id); err != nil {
- logrus.Infof("%s", err)
- continue
- }
- }
-}
-
-func (daemon *Daemon) Rm(container *Container) (err error) {
- return daemon.commonRm(container, false)
-}
-
-func (daemon *Daemon) ForceRm(container *Container) (err error) {
- return daemon.commonRm(container, true)
-}
-
// Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err error) {
- if container == nil {
- return fmt.Errorf("The given container is ")
+func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
+ // stop collection of stats for the container regardless
+ // if stats are currently getting collected.
+ daemon.statsCollector.stopCollection(container)
+
+ if container.IsRunning() {
+ if !forceRemove {
+ return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
+ }
+ if err := container.Kill(); err != nil {
+ return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
+ }
}
element := daemon.containers.Get(container.ID)
@@ -133,7 +105,6 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
}
}()
- container.derefVolumes()
if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
logrus.Debugf("Unable to remove container from link graph: %s", err)
}
@@ -161,3 +132,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro
return nil
}
+
+func (daemon *Daemon) DeleteVolumes(c *Container) error {
+ return c.removeMountPoints()
+}
diff --git a/daemon/exec.go b/daemon/exec.go
index 9aa102690f..043afe0452 100644
--- a/daemon/exec.go
+++ b/daemon/exec.go
@@ -9,10 +9,8 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
- "github.com/docker/docker/daemon/execdriver/lxc"
"github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/runconfig"
)
@@ -112,8 +110,9 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
- if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) {
- return "", lxc.ErrExec
+ // Not all drivers support Exec (LXC for example)
+ if err := checkExecSupport(d.execDriver.Name()); err != nil {
+ return "", err
}
container, err := d.getActiveContainer(config.Container)
@@ -129,7 +128,6 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
Entrypoint: entrypoint,
Arguments: args,
User: config.User,
- Privileged: config.Privileged,
}
execConfig := &execConfig{
@@ -245,72 +243,3 @@ func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pi
return exitStatus, err
}
-
-func (container *Container) GetExecIDs() []string {
- return container.execCommands.List()
-}
-
-func (container *Container) Exec(execConfig *execConfig) error {
- container.Lock()
- defer container.Unlock()
-
- waitStart := make(chan struct{})
-
- callback := func(processConfig *execdriver.ProcessConfig, pid int) {
- if processConfig.Tty {
- // The callback is called after the process Start()
- // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
- // which we close here.
- if c, ok := processConfig.Stdout.(io.Closer); ok {
- c.Close()
- }
- }
- close(waitStart)
- }
-
- // We use a callback here instead of a goroutine and an chan for
- // syncronization purposes
- cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
-
- // Exec should not return until the process is actually running
- select {
- case <-waitStart:
- case err := <-cErr:
- return err
- }
-
- return nil
-}
-
-func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
- var (
- err error
- exitCode int
- )
-
- pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
- exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
- if err != nil {
- logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
- }
-
- logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
- if execConfig.OpenStdin {
- if err := execConfig.StreamConfig.stdin.Close(); err != nil {
- logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
- }
- }
- if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
- logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
- }
- if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
- logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
- }
- if execConfig.ProcessConfig.Terminal != nil {
- if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
- logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
- }
- }
-
- return err
-}
diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go
new file mode 100644
index 0000000000..a360326327
--- /dev/null
+++ b/daemon/exec_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+
+package daemon
+
+import (
+ "strings"
+
+ "github.com/docker/docker/daemon/execdriver/lxc"
+)
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(drivername string) error {
+ if strings.HasPrefix(drivername, lxc.DriverName) {
+ return lxc.ErrExec
+ }
+ return nil
+}
diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go
new file mode 100644
index 0000000000..d6f244e6d6
--- /dev/null
+++ b/daemon/exec_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package daemon
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(DriverName string) error {
+ return nil
+}
diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go
index ce196df201..eca77e921e 100644
--- a/daemon/execdriver/driver.go
+++ b/daemon/execdriver/driver.go
@@ -1,21 +1,14 @@
package execdriver
import (
- "encoding/json"
"errors"
"io"
- "io/ioutil"
- "os"
"os/exec"
- "path/filepath"
- "strconv"
- "strings"
"time"
- "github.com/docker/docker/daemon/execdriver/native/template"
+ // TODO Windows: Factor out ulimit
"github.com/docker/docker/pkg/ulimit"
"github.com/docker/libcontainer"
- "github.com/docker/libcontainer/cgroups/fs"
"github.com/docker/libcontainer/configs"
)
@@ -79,6 +72,7 @@ type Network struct {
Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
Mtu int `json:"mtu"`
ContainerID string `json:"container_id"` // id of the container to join network.
+ NamespacePath string `json:"namespace_path"`
HostNetworking bool `json:"host_networking"`
}
@@ -93,6 +87,11 @@ type Pid struct {
HostPid bool `json:"host_pid"`
}
+// UTS settings of the container
+type UTS struct {
+ HostUTS bool `json:"host_uts"`
+}
+
type NetworkInterface struct {
Gateway string `json:"gateway"`
IPAddress string `json:"ip"`
@@ -103,16 +102,21 @@ type NetworkInterface struct {
LinkLocalIPv6Address string `json:"link_local_ipv6"`
GlobalIPv6PrefixLen int `json:"global_ipv6_prefix_len"`
IPv6Gateway string `json:"ipv6_gateway"`
+ HairpinMode bool `json:"hairpin_mode"`
}
+// TODO Windows: Factor out ulimit.Rlimit
type Resources struct {
- Memory int64 `json:"memory"`
- MemorySwap int64 `json:"memory_swap"`
- CpuShares int64 `json:"cpu_shares"`
- CpusetCpus string `json:"cpuset_cpus"`
- CpusetMems string `json:"cpuset_mems"`
- CpuQuota int64 `json:"cpu_quota"`
- Rlimits []*ulimit.Rlimit `json:"rlimits"`
+ Memory int64 `json:"memory"`
+ MemorySwap int64 `json:"memory_swap"`
+ CpuShares int64 `json:"cpu_shares"`
+ CpusetCpus string `json:"cpuset_cpus"`
+ CpusetMems string `json:"cpuset_mems"`
+ CpuPeriod int64 `json:"cpu_period"`
+ CpuQuota int64 `json:"cpu_quota"`
+ BlkioWeight int64 `json:"blkio_weight"`
+ Rlimits []*ulimit.Rlimit `json:"rlimits"`
+ OomKillDisable bool `json:"oom_kill_disable"`
}
type ResourceStats struct {
@@ -143,6 +147,9 @@ type ProcessConfig struct {
Console string `json:"-"` // dev/console path
}
+// TODO Windows: Factor out unused fields such as LxcConfig, AppArmorProfile,
+// and CgroupParent.
+//
// Process wrapps an os/exec.Cmd to add more metadata
type Command struct {
ID string `json:"id"`
@@ -154,6 +161,7 @@ type Command struct {
Network *Network `json:"network"`
Ipc *Ipc `json:"ipc"`
Pid *Pid `json:"pid"`
+ UTS *UTS `json:"uts"`
Resources *Resources `json:"resources"`
Mounts []Mount `json:"mounts"`
AllowedDevices []*configs.Device `json:"allowed_devices"`
@@ -168,143 +176,3 @@ type Command struct {
AppArmorProfile string `json:"apparmor_profile"`
CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command.
}
-
-func InitContainer(c *Command) *configs.Config {
- container := template.New()
-
- container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
- container.Cgroups.Name = c.ID
- container.Cgroups.AllowedDevices = c.AllowedDevices
- container.Devices = c.AutoCreatedDevices
- container.Rootfs = c.Rootfs
- container.Readonlyfs = c.ReadonlyRootfs
-
- // check to see if we are running in ramdisk to disable pivot root
- container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
-
- // Default parent cgroup is "docker". Override if required.
- if c.CgroupParent != "" {
- container.Cgroups.Parent = c.CgroupParent
- }
- return container
-}
-
-func getEnv(key string, env []string) string {
- for _, pair := range env {
- parts := strings.Split(pair, "=")
- if parts[0] == key {
- return parts[1]
- }
- }
- return ""
-}
-
-func SetupCgroups(container *configs.Config, c *Command) error {
- if c.Resources != nil {
- container.Cgroups.CpuShares = c.Resources.CpuShares
- container.Cgroups.Memory = c.Resources.Memory
- container.Cgroups.MemoryReservation = c.Resources.Memory
- container.Cgroups.MemorySwap = c.Resources.MemorySwap
- container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
- container.Cgroups.CpusetMems = c.Resources.CpusetMems
- container.Cgroups.CpuQuota = c.Resources.CpuQuota
- }
-
- return nil
-}
-
-// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
-func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
- out := &libcontainer.NetworkInterface{Name: interfaceName}
- // This can happen if the network runtime information is missing - possible if the
- // container was created by an old version of libcontainer.
- if interfaceName == "" {
- return out, nil
- }
- type netStatsPair struct {
- // Where to write the output.
- Out *uint64
- // The network stats file to read.
- File string
- }
- // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
- netStats := []netStatsPair{
- {Out: &out.RxBytes, File: "tx_bytes"},
- {Out: &out.RxPackets, File: "tx_packets"},
- {Out: &out.RxErrors, File: "tx_errors"},
- {Out: &out.RxDropped, File: "tx_dropped"},
-
- {Out: &out.TxBytes, File: "rx_bytes"},
- {Out: &out.TxPackets, File: "rx_packets"},
- {Out: &out.TxErrors, File: "rx_errors"},
- {Out: &out.TxDropped, File: "rx_dropped"},
- }
- for _, netStat := range netStats {
- data, err := readSysfsNetworkStats(interfaceName, netStat.File)
- if err != nil {
- return nil, err
- }
- *(netStat.Out) = data
- }
- return out, nil
-}
-
-// Reads the specified statistics available under /sys/class/net//statistics
-func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
- data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
- if err != nil {
- return 0, err
- }
- return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
- f, err := os.Open(filepath.Join(containerDir, "state.json"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- type network struct {
- Type string
- HostInterfaceName string
- }
-
- state := struct {
- CgroupPaths map[string]string `json:"cgroup_paths"`
- Networks []network
- }{}
-
- if err := json.NewDecoder(f).Decode(&state); err != nil {
- return nil, err
- }
- now := time.Now()
-
- mgr := fs.Manager{Paths: state.CgroupPaths}
- cstats, err := mgr.GetStats()
- if err != nil {
- return nil, err
- }
- stats := &libcontainer.Stats{CgroupStats: cstats}
- // if the container does not have any memory limit specified set the
- // limit to the machines memory
- memoryLimit := containerMemoryLimit
- if memoryLimit == 0 {
- memoryLimit = machineMemory
- }
- for _, iface := range state.Networks {
- switch iface.Type {
- case "veth":
- istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
- if err != nil {
- return nil, err
- }
- stats.Interfaces = append(stats.Interfaces, istats)
- }
- }
- return &ResourceStats{
- Stats: stats,
- Read: now,
- MemoryLimit: memoryLimit,
- }, nil
-}
diff --git a/daemon/execdriver/driver_linux.go b/daemon/execdriver/driver_linux.go
new file mode 100644
index 0000000000..63d043e851
--- /dev/null
+++ b/daemon/execdriver/driver_linux.go
@@ -0,0 +1,159 @@
+package execdriver
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/daemon/execdriver/native/template"
+ "github.com/docker/libcontainer"
+ "github.com/docker/libcontainer/cgroups/fs"
+ "github.com/docker/libcontainer/configs"
+)
+
+func InitContainer(c *Command) *configs.Config {
+ container := template.New()
+
+ container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
+ container.Cgroups.Name = c.ID
+ container.Cgroups.AllowedDevices = c.AllowedDevices
+ container.Devices = c.AutoCreatedDevices
+ container.Rootfs = c.Rootfs
+ container.Readonlyfs = c.ReadonlyRootfs
+
+ // check to see if we are running in ramdisk to disable pivot root
+ container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+
+ // Default parent cgroup is "docker". Override if required.
+ if c.CgroupParent != "" {
+ container.Cgroups.Parent = c.CgroupParent
+ }
+ return container
+}
+
+func getEnv(key string, env []string) string {
+ for _, pair := range env {
+ parts := strings.Split(pair, "=")
+ if parts[0] == key {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+func SetupCgroups(container *configs.Config, c *Command) error {
+ if c.Resources != nil {
+ container.Cgroups.CpuShares = c.Resources.CpuShares
+ container.Cgroups.Memory = c.Resources.Memory
+ container.Cgroups.MemoryReservation = c.Resources.Memory
+ container.Cgroups.MemorySwap = c.Resources.MemorySwap
+ container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
+ container.Cgroups.CpusetMems = c.Resources.CpusetMems
+ container.Cgroups.CpuPeriod = c.Resources.CpuPeriod
+ container.Cgroups.CpuQuota = c.Resources.CpuQuota
+ container.Cgroups.BlkioWeight = c.Resources.BlkioWeight
+ container.Cgroups.OomKillDisable = c.Resources.OomKillDisable
+ }
+
+ return nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
+ out := &libcontainer.NetworkInterface{Name: interfaceName}
+ // This can happen if the network runtime information is missing - possible if the
+ // container was created by an old version of libcontainer.
+ if interfaceName == "" {
+ return out, nil
+ }
+ type netStatsPair struct {
+ // Where to write the output.
+ Out *uint64
+ // The network stats file to read.
+ File string
+ }
+ // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+ netStats := []netStatsPair{
+ {Out: &out.RxBytes, File: "tx_bytes"},
+ {Out: &out.RxPackets, File: "tx_packets"},
+ {Out: &out.RxErrors, File: "tx_errors"},
+ {Out: &out.RxDropped, File: "tx_dropped"},
+
+ {Out: &out.TxBytes, File: "rx_bytes"},
+ {Out: &out.TxPackets, File: "rx_packets"},
+ {Out: &out.TxErrors, File: "rx_errors"},
+ {Out: &out.TxDropped, File: "rx_dropped"},
+ }
+ for _, netStat := range netStats {
+ data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+ if err != nil {
+ return nil, err
+ }
+ *(netStat.Out) = data
+ }
+ return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net//statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
+ f, err := os.Open(filepath.Join(containerDir, "state.json"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ type network struct {
+ Type string
+ HostInterfaceName string
+ }
+
+ state := struct {
+ CgroupPaths map[string]string `json:"cgroup_paths"`
+ Networks []network
+ }{}
+
+ if err := json.NewDecoder(f).Decode(&state); err != nil {
+ return nil, err
+ }
+ now := time.Now()
+
+ mgr := fs.Manager{Paths: state.CgroupPaths}
+ cstats, err := mgr.GetStats()
+ if err != nil {
+ return nil, err
+ }
+ stats := &libcontainer.Stats{CgroupStats: cstats}
+ // if the container does not have any memory limit specified set the
+ // limit to the machines memory
+ memoryLimit := containerMemoryLimit
+ if memoryLimit == 0 {
+ memoryLimit = machineMemory
+ }
+ for _, iface := range state.Networks {
+ switch iface.Type {
+ case "veth":
+ istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+ if err != nil {
+ return nil, err
+ }
+ stats.Interfaces = append(stats.Interfaces, istats)
+ }
+ }
+ return &ResourceStats{
+ Stats: stats,
+ Read: now,
+ MemoryLimit: memoryLimit,
+ }, nil
+}
diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers_linux.go
similarity index 97%
rename from daemon/execdriver/execdrivers/execdrivers.go
rename to daemon/execdriver/execdrivers/execdrivers_linux.go
index dde0be1f0f..89dedc762e 100644
--- a/daemon/execdriver/execdrivers/execdrivers.go
+++ b/daemon/execdriver/execdrivers/execdrivers_linux.go
@@ -1,3 +1,5 @@
+// +build linux
+
package execdrivers
import (
diff --git a/daemon/execdriver/execdrivers/execdrivers_windows.go b/daemon/execdriver/execdrivers/execdrivers_windows.go
new file mode 100644
index 0000000000..aca21ead46
--- /dev/null
+++ b/daemon/execdriver/execdrivers/execdrivers_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+
+package execdrivers
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/daemon/execdriver"
+ "github.com/docker/docker/daemon/execdriver/windows"
+ "github.com/docker/docker/pkg/sysinfo"
+)
+
+func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
+ switch name {
+ case "windows":
+ return windows.NewDriver(root, initPath)
+ }
+ return nil, fmt.Errorf("unknown exec driver %s", name)
+}
diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go
index 1637bc2c69..4b5730a3f4 100644
--- a/daemon/execdriver/lxc/driver.go
+++ b/daemon/execdriver/lxc/driver.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
@@ -10,6 +12,7 @@ import (
"os/exec"
"path"
"path/filepath"
+ "runtime"
"strconv"
"strings"
"sync"
@@ -28,6 +31,7 @@ import (
"github.com/docker/libcontainer/system"
"github.com/docker/libcontainer/user"
"github.com/kr/pty"
+ "github.com/vishvananda/netns"
)
const DriverName = "lxc"
@@ -78,6 +82,41 @@ func (d *driver) Name() string {
return fmt.Sprintf("%s-%s", DriverName, version)
}
+func setupNetNs(nsPath string) (*os.Process, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := netns.Get()
+ if err != nil {
+ return nil, err
+ }
+ defer origns.Close()
+
+ f, err := os.OpenFile(nsPath, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get network namespace %q: %v", nsPath, err)
+ }
+ defer f.Close()
+
+ nsFD := f.Fd()
+ if err := netns.Set(netns.NsHandle(nsFD)); err != nil {
+ return nil, fmt.Errorf("failed to set network namespace %q: %v", nsPath, err)
+ }
+ defer netns.Set(origns)
+
+ cmd := exec.Command("/bin/sh", "-c", "while true; do sleep 1; done")
+ if err := cmd.Start(); err != nil {
+ return nil, fmt.Errorf("failed to start netns process: %v", err)
+ }
+
+ return cmd.Process, nil
+}
+
+func killNetNsProc(proc *os.Process) {
+ proc.Kill()
+ proc.Wait()
+}
+
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
var (
term execdriver.Terminal
@@ -85,16 +124,25 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
dataPath = d.containerDir(c.ID)
)
+ if c.Network.NamespacePath == "" && c.Network.ContainerID == "" {
+ return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network")
+ }
+
+ container, err := d.createContainer(c)
+ if err != nil {
+ return execdriver.ExitStatus{ExitCode: -1}, err
+ }
+
if c.ProcessConfig.Tty {
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
}
- c.ProcessConfig.Terminal = term
- container, err := d.createContainer(c)
if err != nil {
return execdriver.ExitStatus{ExitCode: -1}, err
}
+ c.ProcessConfig.Terminal = term
+
d.Lock()
d.activeContainers[c.ID] = &activeContainer{
container: container,
@@ -120,6 +168,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
"lxc-start",
"-n", c.ID,
"-f", configPath,
+ "-q",
}
// From lxc>=1.1 the default behavior is to daemonize containers after start
@@ -128,10 +177,20 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
params = append(params, "-F")
}
+ proc := &os.Process{}
if c.Network.ContainerID != "" {
params = append(params,
"--share-net", c.Network.ContainerID,
)
+ } else {
+ proc, err = setupNetNs(c.Network.NamespacePath)
+ if err != nil {
+ return execdriver.ExitStatus{ExitCode: -1}, err
+ }
+
+ pidStr := fmt.Sprintf("%d", proc.Pid)
+ params = append(params,
+ "--share-net", pidStr)
}
if c.Ipc != nil {
if c.Ipc.ContainerID != "" {
@@ -149,15 +208,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
"--",
c.InitPath,
)
- if c.Network.Interface != nil {
- params = append(params,
- "-g", c.Network.Interface.Gateway,
- "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
- )
- }
- params = append(params,
- "-mtu", strconv.Itoa(c.Network.Mtu),
- )
if c.ProcessConfig.User != "" {
params = append(params, "-u", c.ProcessConfig.User)
@@ -206,10 +256,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
c.ProcessConfig.Args = append([]string{name}, arg...)
if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
+ killNetNsProc(proc)
return execdriver.ExitStatus{ExitCode: -1}, err
}
if err := c.ProcessConfig.Start(); err != nil {
+ killNetNsProc(proc)
return execdriver.ExitStatus{ExitCode: -1}, err
}
@@ -237,8 +289,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
+ killNetNsProc(proc)
return terminate(err)
}
+ killNetNsProc(proc)
cgroupPaths, err := cgroupPaths(c.ID)
if err != nil {
@@ -271,19 +325,20 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
oomKillNotification, err := notifyOnOOM(cgroupPaths)
<-waitLock
+ exitCode := getExitCode(c)
if err == nil {
_, oomKill = <-oomKillNotification
- logrus.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
+ logrus.Debugf("oomKill error: %v, waitErr: %v", oomKill, waitErr)
} else {
logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
}
// check oom error
- exitCode := getExitCode(c)
if oomKill {
exitCode = 137
}
+
return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}
@@ -461,7 +516,11 @@ func getExitCode(c *execdriver.Command) int {
}
func (d *driver) Kill(c *execdriver.Command, sig int) error {
- return KillLxc(c.ID, sig)
+ if sig == 9 || c.ProcessConfig.Process == nil {
+ return KillLxc(c.ID, sig)
+ }
+
+ return c.ProcessConfig.Process.Signal(syscall.Signal(sig))
}
func (d *driver) Pause(c *execdriver.Command) error {
@@ -521,7 +580,8 @@ func KillLxc(id string, sig int) error {
if err == nil {
output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput()
} else {
- output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput()
+ // lxc-stop does not take arbitrary signals like lxc-kill does
+ output, err = exec.Command("lxc-stop", "-k", "-n", id).CombinedOutput()
}
if err != nil {
return fmt.Errorf("Err: %s Output: %s", err, output)
diff --git a/daemon/execdriver/lxc/info.go b/daemon/execdriver/lxc/info.go
index 27b4c58604..279211f324 100644
--- a/daemon/execdriver/lxc/info.go
+++ b/daemon/execdriver/lxc/info.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
diff --git a/daemon/execdriver/lxc/info_test.go b/daemon/execdriver/lxc/info_test.go
index edafc02511..996d56b2a3 100644
--- a/daemon/execdriver/lxc/info_test.go
+++ b/daemon/execdriver/lxc/info_test.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go
index eca1c02e21..a47ece97fd 100644
--- a/daemon/execdriver/lxc/init.go
+++ b/daemon/execdriver/lxc/init.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go
index e7bc2b5f3a..fb89ac6a0a 100644
--- a/daemon/execdriver/lxc/lxc_init_linux.go
+++ b/daemon/execdriver/lxc/lxc_init_linux.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go
index 97bc8a984c..3b7be139bb 100644
--- a/daemon/execdriver/lxc/lxc_init_unsupported.go
+++ b/daemon/execdriver/lxc/lxc_init_unsupported.go
@@ -3,5 +3,5 @@
package lxc
func finalizeNamespace(args *InitArgs) error {
- panic("Not supported on darwin")
+ panic("Not supported on this platform")
}
diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go
index b3be7f8c51..6bb50e6ab1 100644
--- a/daemon/execdriver/lxc/lxc_template.go
+++ b/daemon/execdriver/lxc/lxc_template.go
@@ -1,3 +1,5 @@
+// +build linux
+
package lxc
import (
@@ -14,22 +16,7 @@ import (
)
const LxcTemplate = `
-{{if .Network.Interface}}
-# network configuration
-lxc.network.type = veth
-lxc.network.link = {{.Network.Interface.Bridge}}
-lxc.network.name = eth0
-lxc.network.mtu = {{.Network.Mtu}}
-lxc.network.flags = up
-{{else if .Network.HostNetworking}}
lxc.network.type = none
-{{else}}
-# network is disabled (-n=false)
-lxc.network.type = empty
-lxc.network.flags = up
-lxc.network.mtu = {{.Network.Mtu}}
-{{end}}
-
# root filesystem
{{$ROOTFS := .Rootfs}}
lxc.rootfs = {{$ROOTFS}}
@@ -107,6 +94,9 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
{{if .Resources.CpuShares}}
lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
{{end}}
+{{if .Resources.CpuPeriod}}
+lxc.cgroup.cpu.cfs_period_us = {{.Resources.CpuPeriod}}
+{{end}}
{{if .Resources.CpusetCpus}}
lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}}
{{end}}
@@ -116,6 +106,12 @@ lxc.cgroup.cpuset.mems = {{.Resources.CpusetMems}}
{{if .Resources.CpuQuota}}
lxc.cgroup.cpu.cfs_quota_us = {{.Resources.CpuQuota}}
{{end}}
+{{if .Resources.BlkioWeight}}
+lxc.cgroup.blkio.weight = {{.Resources.BlkioWeight}}
+{{end}}
+{{if .Resources.OomKillDisable}}
+lxc.cgroup.memory.oom_control = {{.Resources.OomKillDisable}}
+{{end}}
{{end}}
{{if .LxcConfig}}
@@ -134,6 +130,7 @@ lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}}
{{if .Network.Interface.MacAddress}}
lxc.network.hwaddr = {{.Network.Interface.MacAddress}}
{{end}}
+{{end}}
{{if .ProcessConfig.Env}}
lxc.utsname = {{getHostname .ProcessConfig.Env}}
{{end}}
@@ -153,7 +150,6 @@ lxc.cap.drop = {{.}}
{{end}}
{{end}}
{{end}}
-{{end}}
`
var LxcTemplateCompiled *template.Template
diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go
index fcac6a3e57..904fa120a8 100644
--- a/daemon/execdriver/lxc/lxc_template_unit_test.go
+++ b/daemon/execdriver/lxc/lxc_template_unit_test.go
@@ -264,13 +264,8 @@ func TestCustomLxcConfigMisc(t *testing.T) {
"lxc.cgroup.cpuset.cpus = 0,1",
},
Network: &execdriver.Network{
- Mtu: 1500,
- Interface: &execdriver.NetworkInterface{
- Gateway: "10.10.10.1",
- IPAddress: "10.10.10.10",
- IPPrefixLen: 24,
- Bridge: "docker0",
- },
+ Mtu: 1500,
+ Interface: nil,
},
ProcessConfig: processConfig,
CapAdd: []string{"net_admin", "syslog"},
@@ -282,13 +277,6 @@ func TestCustomLxcConfigMisc(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- // network
- grepFile(t, p, "lxc.network.type = veth")
- grepFile(t, p, "lxc.network.link = docker0")
- grepFile(t, p, "lxc.network.name = eth0")
- grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
- grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
- grepFile(t, p, "lxc.network.flags = up")
grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting")
// hostname
grepFile(t, p, "lxc.utsname = testhost")
@@ -329,13 +317,8 @@ func TestCustomLxcConfigMiscOverride(t *testing.T) {
"lxc.network.ipv4 = 172.0.0.1",
},
Network: &execdriver.Network{
- Mtu: 1500,
- Interface: &execdriver.NetworkInterface{
- Gateway: "10.10.10.1",
- IPAddress: "10.10.10.10",
- IPPrefixLen: 24,
- Bridge: "docker0",
- },
+ Mtu: 1500,
+ Interface: nil,
},
ProcessConfig: processConfig,
CapAdd: []string{"NET_ADMIN", "SYSLOG"},
@@ -346,13 +329,6 @@ func TestCustomLxcConfigMiscOverride(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- // network
- grepFile(t, p, "lxc.network.type = veth")
- grepFile(t, p, "lxc.network.link = docker0")
- grepFile(t, p, "lxc.network.name = eth0")
- grepFile(t, p, "lxc.network.ipv4 = 172.0.0.1")
- grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
- grepFile(t, p, "lxc.network.flags = up")
// hostname
grepFile(t, p, "lxc.utsname = testhost")
diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go
index fa53621c47..1b2d7232d3 100644
--- a/daemon/execdriver/native/create.go
+++ b/daemon/execdriver/native/create.go
@@ -6,12 +6,10 @@ import (
"errors"
"fmt"
"net"
- "path/filepath"
"strings"
"syscall"
"github.com/docker/docker/daemon/execdriver"
- "github.com/docker/docker/pkg/symlink"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/configs"
"github.com/docker/libcontainer/devices"
@@ -31,6 +29,10 @@ func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error)
return nil, err
}
+ if err := d.createUTS(container, c); err != nil {
+ return nil, err
+ }
+
if err := d.createNetwork(container, c); err != nil {
return nil, err
}
@@ -65,9 +67,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error)
return nil, err
}
- if err := d.setupLabels(container, c); err != nil {
- return nil, err
- }
+ d.setupLabels(container, c)
d.setupRlimits(container, c)
return container, nil
}
@@ -89,39 +89,9 @@ func generateIfaceName() (string, error) {
}
func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error {
- if c.Network.HostNetworking {
- container.Namespaces.Remove(configs.NEWNET)
+ if c.Network == nil {
return nil
}
-
- container.Networks = []*configs.Network{
- {
- Type: "loopback",
- },
- }
-
- iName, err := generateIfaceName()
- if err != nil {
- return err
- }
- if c.Network.Interface != nil {
- vethNetwork := configs.Network{
- Name: "eth0",
- HostInterfaceName: iName,
- Mtu: c.Network.Mtu,
- Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
- MacAddress: c.Network.Interface.MacAddress,
- Gateway: c.Network.Interface.Gateway,
- Type: "veth",
- Bridge: c.Network.Interface.Bridge,
- }
- if c.Network.Interface.GlobalIPv6Address != "" {
- vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen)
- vethNetwork.IPv6Gateway = c.Network.Interface.IPv6Gateway
- }
- container.Networks = append(container.Networks, &vethNetwork)
- }
-
if c.Network.ContainerID != "" {
d.Lock()
active := d.activeContainers[c.Network.ContainerID]
@@ -137,8 +107,14 @@ func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command)
}
container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET])
+ return nil
}
+ if c.Network.NamespacePath == "" {
+ return fmt.Errorf("network namespace path is empty")
+ }
+
+ container.Namespaces.Add(configs.NEWNET, c.Network.NamespacePath)
return nil
}
@@ -176,6 +152,16 @@ func (d *driver) createPid(container *configs.Config, c *execdriver.Command) err
return nil
}
+func (d *driver) createUTS(container *configs.Config, c *execdriver.Command) error {
+ if c.UTS.HostUTS {
+ container.Namespaces.Remove(configs.NEWUTS)
+ container.Hostname = ""
+ return nil
+ }
+
+ return nil
+}
+
func (d *driver) setPrivileged(container *configs.Config) (err error) {
container.Capabilities = execdriver.GetAllCapabilities()
container.Cgroups.AllowAllDevices = true
@@ -232,10 +218,6 @@ func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) e
container.Mounts = defaultMounts
for _, m := range c.Mounts {
- dest, err := symlink.FollowSymlinkInScope(filepath.Join(c.Rootfs, m.Destination), c.Rootfs)
- if err != nil {
- return err
- }
flags := syscall.MS_BIND | syscall.MS_REC
if !m.Writable {
flags |= syscall.MS_RDONLY
@@ -243,10 +225,9 @@ func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) e
if m.Slave {
flags |= syscall.MS_SLAVE
}
-
container.Mounts = append(container.Mounts, &configs.Mount{
Source: m.Source,
- Destination: dest,
+ Destination: m.Destination,
Device: "bind",
Flags: flags,
})
@@ -254,9 +235,7 @@ func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) e
return nil
}
-func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) error {
+func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) {
container.ProcessLabel = c.ProcessLabel
container.MountLabel = c.MountLabel
-
- return nil
}
diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go
index afc3f1e45e..4da3e34382 100644
--- a/daemon/execdriver/native/driver.go
+++ b/daemon/execdriver/native/driver.go
@@ -47,7 +47,7 @@ func NewDriver(root, initPath string, options []string) (*driver, error) {
return nil, err
}
- if err := os.MkdirAll(root, 0700); err != nil {
+ if err := sysinfo.MkdirAll(root, 0700); err != nil {
return nil, err
}
// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
@@ -245,7 +245,9 @@ func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*o
}
func (d *driver) Kill(c *execdriver.Command, sig int) error {
+ d.Lock()
active := d.activeContainers[c.ID]
+ d.Unlock()
if active == nil {
return fmt.Errorf("active container for %s does not exist", c.ID)
}
@@ -257,7 +259,9 @@ func (d *driver) Kill(c *execdriver.Command, sig int) error {
}
func (d *driver) Pause(c *execdriver.Command) error {
+ d.Lock()
active := d.activeContainers[c.ID]
+ d.Unlock()
if active == nil {
return fmt.Errorf("active container for %s does not exist", c.ID)
}
@@ -265,7 +269,9 @@ func (d *driver) Pause(c *execdriver.Command) error {
}
func (d *driver) Unpause(c *execdriver.Command) error {
+ d.Lock()
active := d.activeContainers[c.ID]
+ d.Unlock()
if active == nil {
return fmt.Errorf("active container for %s does not exist", c.ID)
}
@@ -333,7 +339,9 @@ func (d *driver) Clean(id string) error {
}
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+ d.Lock()
c := d.activeContainers[id]
+ d.Unlock()
if c == nil {
return nil, execdriver.ErrNotRunning
}
diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go
index dd41c0ad1d..a9b0e79384 100644
--- a/daemon/execdriver/native/exec.go
+++ b/daemon/execdriver/native/exec.go
@@ -14,6 +14,7 @@ import (
"github.com/docker/libcontainer/utils"
)
+// TODO(vishh): Add support for running in privileged mode.
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
@@ -27,10 +28,6 @@ func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo
User: processConfig.User,
}
- if processConfig.Privileged {
- p.Capabilities = execdriver.GetAllCapabilities()
- }
-
config := active.Config()
if err := setupPipes(&config, processConfig, p, pipes); err != nil {
return -1, err
diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go
index f57d6cddec..2a6cd26dab 100644
--- a/daemon/execdriver/native/init.go
+++ b/daemon/execdriver/native/init.go
@@ -32,7 +32,7 @@ func initializer() {
if err != nil {
fatal(err)
}
- if err := factory.StartInitialization(3); err != nil {
+ if err := factory.StartInitialization(); err != nil {
fatal(err)
}
diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go
index 76e3cea787..ecedcfc8cb 100644
--- a/daemon/execdriver/native/template/default_template.go
+++ b/daemon/execdriver/native/template/default_template.go
@@ -82,9 +82,16 @@ func New() *configs.Config {
},
MaskPaths: []string{
"/proc/kcore",
+ "/proc/latency_stats",
+ "/proc/timer_stats",
},
ReadonlyPaths: []string{
- "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
+ "/proc/asound",
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger",
},
}
diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go
index 407c4f4fa1..fd5a270552 100644
--- a/daemon/execdriver/utils.go
+++ b/daemon/execdriver/utils.go
@@ -8,44 +8,25 @@ import (
"github.com/syndtr/gocapability/capability"
)
-var capabilityList = Capabilities{
- {Key: "SETPCAP", Value: capability.CAP_SETPCAP},
- {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
- {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
- {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
- {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
- {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
- {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
- {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
- {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
- {Key: "MKNOD", Value: capability.CAP_MKNOD},
- {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
- {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
- {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
- {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
- {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
- {Key: "SYSLOG", Value: capability.CAP_SYSLOG},
- {Key: "CHOWN", Value: capability.CAP_CHOWN},
- {Key: "NET_RAW", Value: capability.CAP_NET_RAW},
- {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
- {Key: "FOWNER", Value: capability.CAP_FOWNER},
- {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
- {Key: "FSETID", Value: capability.CAP_FSETID},
- {Key: "KILL", Value: capability.CAP_KILL},
- {Key: "SETGID", Value: capability.CAP_SETGID},
- {Key: "SETUID", Value: capability.CAP_SETUID},
- {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
- {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
- {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
- {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
- {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
- {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
- {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
- {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
- {Key: "LEASE", Value: capability.CAP_LEASE},
- {Key: "SETFCAP", Value: capability.CAP_SETFCAP},
- {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
- {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
+var capabilityList Capabilities
+
+func init() {
+ last := capability.CAP_LAST_CAP
+ // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+ if last == capability.Cap(63) {
+ last = capability.CAP_BLOCK_SUSPEND
+ }
+ for _, cap := range capability.List() {
+ if cap > last {
+ continue
+ }
+ capabilityList = append(capabilityList,
+ &CapabilityMapping{
+ Key: strings.ToUpper(cap.String()),
+ Value: cap,
+ },
+ )
+ }
}
type (
diff --git a/daemon/execdriver/windows/unsupported.go b/daemon/execdriver/windows/unsupported.go
new file mode 100644
index 0000000000..0a492e1267
--- /dev/null
+++ b/daemon/execdriver/windows/unsupported.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package windows
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+ return nil, fmt.Errorf("Windows driver not supported on non-Windows")
+}
diff --git a/daemon/execdriver/windows/windows.go b/daemon/execdriver/windows/windows.go
new file mode 100644
index 0000000000..9837270235
--- /dev/null
+++ b/daemon/execdriver/windows/windows.go
@@ -0,0 +1,97 @@
+// +build windows
+
+/*
+ This is the Windows driver for containers.
+
+ TODO Windows: It is currently a placeholder to allow compilation of the
+ daemon. Future PRs will have an implementation of this driver.
+*/
+
+package windows
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/daemon/execdriver"
+)
+
+const (
+ DriverName = "Windows"
+ Version = "Placeholder"
+)
+
+type activeContainer struct {
+ command *execdriver.Command
+}
+
+type driver struct {
+ root string
+ initPath string
+}
+
+type info struct {
+ ID string
+ driver *driver
+}
+
+func NewDriver(root, initPath string) (*driver, error) {
+ return &driver{
+ root: root,
+ initPath: initPath,
+ }, nil
+}
+
+func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
+ return execdriver.ExitStatus{ExitCode: 0}, nil
+}
+
+func (d *driver) Terminate(p *execdriver.Command) error {
+ return nil
+}
+
+func (d *driver) Kill(p *execdriver.Command, sig int) error {
+ return nil
+}
+
+func kill(ID string, PID int) error {
+ return nil
+}
+
+func (d *driver) Pause(c *execdriver.Command) error {
+ return fmt.Errorf("Windows: Containers cannot be paused")
+}
+
+func (d *driver) Unpause(c *execdriver.Command) error {
+ return fmt.Errorf("Windows: Containers cannot be paused")
+}
+
+func (i *info) IsRunning() bool {
+ return false
+}
+
+func (d *driver) Info(id string) execdriver.Info {
+ return &info{
+ ID: id,
+ driver: d,
+ }
+}
+
+func (d *driver) Name() string {
+ return fmt.Sprintf("%s Date %s", DriverName, Version)
+}
+
+func (d *driver) GetPidsForContainer(id string) ([]int, error) {
+ return nil, fmt.Errorf("GetPidsForContainer: GetPidsForContainer() not implemented")
+}
+
+func (d *driver) Clean(id string) error {
+ return nil
+}
+
+func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+ return nil, fmt.Errorf("Windows: Stats not implemented")
+}
+
+func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+ return 0, nil
+}
diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md
index a090b731fa..2b6e5e2e64 100644
--- a/daemon/graphdriver/devmapper/README.md
+++ b/daemon/graphdriver/devmapper/README.md
@@ -252,3 +252,23 @@ Here is the list of supported options:
> Otherwise, set this flag for migrating existing Docker daemons to a
> daemon with a supported environment.
+ * `dm.use_deferred_removal`
+
+ Enables use of deferred device removal if libdm and kernel driver
+ support the mechanism.
+
+ Deferred device removal means that if device is busy when devices is
+ being removed/deactivated, then a deferred removal is scheduled on
+ device. And devices automatically goes away when last user of device
+ exits.
+
+ For example, when contianer exits, its associated thin device is
+ removed. If that devices has leaked into some other mount namespace
+ can can't be removed now, container exit will still be successful
+ and this option will just schedule device for deferred removal and
+ will not wait in a loop trying to remove a busy device.
+
+ Example use:
+
+ ``docker -d --storage-opt dm.use_deferred_removal=true``
+
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 42b9d76bed..24805179f8 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -37,7 +37,9 @@ var (
// We retry device removal so many a times that even error messages
// will fill up console during normal operation. So only log Fatal
// messages by default.
- DMLogLevel int = devicemapper.LogLevelFatal
+ DMLogLevel int = devicemapper.LogLevelFatal
+ DriverDeferredRemovalSupport bool = false
+ EnableDeferredRemoval bool = false
)
const deviceSetMetaFile string = "deviceset-metadata"
@@ -103,6 +105,7 @@ type DeviceSet struct {
thinPoolDevice string
Transaction `json:"-"`
overrideUdevSyncCheck bool
+ deferredRemove bool // use deferred removal
}
type DiskUsage struct {
@@ -112,15 +115,16 @@ type DiskUsage struct {
}
type Status struct {
- PoolName string
- DataFile string // actual block device for data
- DataLoopback string // loopback file, if used
- MetadataFile string // actual block device for metadata
- MetadataLoopback string // loopback file, if used
- Data DiskUsage
- Metadata DiskUsage
- SectorSize uint64
- UdevSyncSupported bool
+ PoolName string
+ DataFile string // actual block device for data
+ DataLoopback string // loopback file, if used
+ MetadataFile string // actual block device for metadata
+ MetadataLoopback string // loopback file, if used
+ Data DiskUsage
+ Metadata DiskUsage
+ SectorSize uint64
+ UdevSyncSupported bool
+ DeferredRemoveEnabled bool
}
type DevStatus struct {
@@ -434,6 +438,12 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, trans
func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
logrus.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
+ // Make sure deferred removal on device is canceled, if one was
+ // scheduled.
+ if err := devices.cancelDeferredRemoval(info); err != nil {
+ return fmt.Errorf("Deivce Deferred Removal Cancellation Failed: %s", err)
+ }
+
if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
return nil
}
@@ -960,16 +970,67 @@ func (devices *DeviceSet) closeTransaction() error {
return nil
}
+func determineDriverCapabilities(version string) error {
+ /*
+ * Driver version 4.27.0 and greater support deferred activation
+ * feature.
+ */
+
+ logrus.Debugf("devicemapper: driver version is %s", version)
+
+ versionSplit := strings.Split(version, ".")
+ major, err := strconv.Atoi(versionSplit[0])
+ if err != nil {
+ return graphdriver.ErrNotSupported
+ }
+
+ if major > 4 {
+ DriverDeferredRemovalSupport = true
+ return nil
+ }
+
+ if major < 4 {
+ return nil
+ }
+
+ minor, err := strconv.Atoi(versionSplit[1])
+ if err != nil {
+ return graphdriver.ErrNotSupported
+ }
+
+ /*
+ * If major is 4 and minor is 27, then there is no need to
+ * check for patch level as it can not be less than 0.
+ */
+ if minor >= 27 {
+ DriverDeferredRemovalSupport = true
+ return nil
+ }
+
+ return nil
+}
+
func (devices *DeviceSet) initDevmapper(doInit bool) error {
// give ourselves to libdm as a log handler
devicemapper.LogInit(devices)
- _, err := devicemapper.GetDriverVersion()
+ version, err := devicemapper.GetDriverVersion()
if err != nil {
// Can't even get driver version, assume not supported
return graphdriver.ErrNotSupported
}
+ if err := determineDriverCapabilities(version); err != nil {
+ return graphdriver.ErrNotSupported
+ }
+
+ // If user asked for deferred removal and both library and driver
+ // supports deferred removal use it.
+ if EnableDeferredRemoval && DriverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport == true {
+ logrus.Debugf("devmapper: Deferred removal support enabled.")
+ devices.deferredRemove = true
+ }
+
// https://github.com/docker/docker/issues/4036
if supported := devicemapper.UdevSetSyncSupport(true); !supported {
logrus.Errorf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/cli/#daemon-storage-driver-option")
@@ -1233,12 +1294,20 @@ func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
if err != nil {
return err
}
- if devinfo.Exists != 0 {
+
+ if devinfo.Exists == 0 {
+ return nil
+ }
+
+ if devices.deferredRemove {
+ if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil {
+ return err
+ }
+ } else {
if err := devices.removeDevice(info.Name()); err != nil {
return err
}
}
-
return nil
}
@@ -1268,6 +1337,45 @@ func (devices *DeviceSet) removeDevice(devname string) error {
return err
}
+func (devices *DeviceSet) cancelDeferredRemoval(info *DevInfo) error {
+ if !devices.deferredRemove {
+ return nil
+ }
+
+ logrus.Debugf("[devmapper] cancelDeferredRemoval START(%s)", info.Name())
+ defer logrus.Debugf("[devmapper] cancelDeferredRemoval END(%s)", info.Name)
+
+ devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
+
+ if devinfo != nil && devinfo.DeferredRemove == 0 {
+ return nil
+ }
+
+ // Cancel deferred remove
+ for i := 0; i < 100; i++ {
+ err = devicemapper.CancelDeferredRemove(info.Name())
+ if err == nil {
+ break
+ }
+
+ if err == devicemapper.ErrEnxio {
+ // Device is probably already gone. Return success.
+ return nil
+ }
+
+ if err != devicemapper.ErrBusy {
+ return err
+ }
+
+ // If we see EBUSY it may be a transient error,
+ // sleep a bit a retry a few times.
+ devices.Unlock()
+ time.Sleep(100 * time.Millisecond)
+ devices.Lock()
+ }
+ return err
+}
+
func (devices *DeviceSet) Shutdown() error {
logrus.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix)
logrus.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
@@ -1366,11 +1474,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
options = joinMountOptions(options, devices.mountOptions)
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
- err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options))
- if err != nil && err == syscall.EINVAL {
- err = syscall.Mount(info.DevName(), path, fstype, flags, options)
- }
- if err != nil {
+ if err := syscall.Mount(info.DevName(), path, fstype, flags, options); err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
}
@@ -1560,6 +1664,7 @@ func (devices *DeviceSet) Status() *Status {
status.MetadataFile = devices.MetadataDevicePath()
status.MetadataLoopback = devices.metadataLoopFile
status.UdevSyncSupported = devicemapper.UdevSyncSupported()
+ status.DeferredRemoveEnabled = devices.deferredRemove
totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
if err == nil {
@@ -1670,6 +1775,13 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error
if err != nil {
return nil, err
}
+
+ case "dm.use_deferred_removal":
+ EnableDeferredRemoval, err = strconv.ParseBool(val)
+ if err != nil {
+ return nil, err
+ }
+
default:
return nil, fmt.Errorf("Unknown option %s\n", key)
}
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index fad0a0c55d..bdf7f874f9 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -77,6 +77,7 @@ func (d *Driver) Status() [][2]string {
{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
+ {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
}
if len(s.DataLoopback) > 0 {
status = append(status, [2]string{"Data loop file", s.DataLoopback})
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index c57dd87136..963acdfbb1 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
"os"
- "path"
+ "path/filepath"
"strings"
"github.com/Sirupsen/logrus"
@@ -14,59 +14,17 @@ import (
type FsMagic uint32
const (
- FsMagicAufs = FsMagic(0x61756673)
- FsMagicBtrfs = FsMagic(0x9123683E)
- FsMagicCramfs = FsMagic(0x28cd3d45)
- FsMagicExtfs = FsMagic(0x0000EF53)
- FsMagicF2fs = FsMagic(0xF2F52010)
- FsMagicJffs2Fs = FsMagic(0x000072b6)
- FsMagicJfs = FsMagic(0x3153464a)
- FsMagicNfsFs = FsMagic(0x00006969)
- FsMagicRamFs = FsMagic(0x858458f6)
- FsMagicReiserFs = FsMagic(0x52654973)
- FsMagicSmbFs = FsMagic(0x0000517B)
- FsMagicSquashFs = FsMagic(0x73717368)
- FsMagicTmpFs = FsMagic(0x01021994)
FsMagicUnsupported = FsMagic(0x00000000)
- FsMagicXfs = FsMagic(0x58465342)
- FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
DefaultDriver string
// All registred drivers
drivers map[string]InitFunc
- // Slice of drivers that should be used in an order
- priority = []string{
- "aufs",
- "btrfs",
- "devicemapper",
- "overlay",
- "vfs",
- }
ErrNotSupported = errors.New("driver not supported")
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
-
- FsNames = map[FsMagic]string{
- FsMagicAufs: "aufs",
- FsMagicBtrfs: "btrfs",
- FsMagicCramfs: "cramfs",
- FsMagicExtfs: "extfs",
- FsMagicF2fs: "f2fs",
- FsMagicJffs2Fs: "jffs2",
- FsMagicJfs: "jfs",
- FsMagicNfsFs: "nfs",
- FsMagicRamFs: "ramfs",
- FsMagicReiserFs: "reiserfs",
- FsMagicSmbFs: "smb",
- FsMagicSquashFs: "squashfs",
- FsMagicTmpFs: "tmpfs",
- FsMagicUnsupported: "unsupported",
- FsMagicXfs: "xfs",
- FsMagicZfs: "zfs",
- }
)
type InitFunc func(root string, options []string) (Driver, error)
@@ -138,7 +96,7 @@ func Register(name string, initFunc InitFunc) error {
func GetDriver(name, home string, options []string) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
- return initFunc(path.Join(home, name), options)
+ return initFunc(filepath.Join(home, name), options)
}
return nil, ErrNotSupported
}
@@ -209,7 +167,7 @@ func New(root string, options []string) (driver Driver, err error) {
func scanPriorDrivers(root string) []string {
priorDrivers := []string{}
for driver := range drivers {
- p := path.Join(root, driver)
+ p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil {
priorDrivers = append(priorDrivers, driver)
}
@@ -221,7 +179,7 @@ func checkPriorDriver(name, root string) error {
priorDrivers := []string{}
for _, prior := range scanPriorDrivers(root) {
if prior != name && prior != "vfs" {
- if _, err := os.Stat(path.Join(root, prior)); err == nil {
+ if _, err := os.Stat(filepath.Join(root, prior)); err == nil {
priorDrivers = append(priorDrivers, prior)
}
}
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index acf96d1b40..88d88e2615 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -1,13 +1,64 @@
+// +build linux
+
package graphdriver
import (
- "path"
+ "path/filepath"
"syscall"
)
+const (
+ FsMagicAufs = FsMagic(0x61756673)
+ FsMagicBtrfs = FsMagic(0x9123683E)
+ FsMagicCramfs = FsMagic(0x28cd3d45)
+ FsMagicExtfs = FsMagic(0x0000EF53)
+ FsMagicF2fs = FsMagic(0xF2F52010)
+ FsMagicJffs2Fs = FsMagic(0x000072b6)
+ FsMagicJfs = FsMagic(0x3153464a)
+ FsMagicNfsFs = FsMagic(0x00006969)
+ FsMagicRamFs = FsMagic(0x858458f6)
+ FsMagicReiserFs = FsMagic(0x52654973)
+ FsMagicSmbFs = FsMagic(0x0000517B)
+ FsMagicSquashFs = FsMagic(0x73717368)
+ FsMagicTmpFs = FsMagic(0x01021994)
+ FsMagicXfs = FsMagic(0x58465342)
+ FsMagicZfs = FsMagic(0x2fc12fc1)
+)
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "aufs",
+ "btrfs",
+ "zfs",
+ "devicemapper",
+ "overlay",
+ "vfs",
+ }
+
+ FsNames = map[FsMagic]string{
+ FsMagicAufs: "aufs",
+ FsMagicBtrfs: "btrfs",
+ FsMagicCramfs: "cramfs",
+ FsMagicExtfs: "extfs",
+ FsMagicF2fs: "f2fs",
+ FsMagicJffs2Fs: "jffs2",
+ FsMagicJfs: "jfs",
+ FsMagicNfsFs: "nfs",
+ FsMagicRamFs: "ramfs",
+ FsMagicReiserFs: "reiserfs",
+ FsMagicSmbFs: "smb",
+ FsMagicSquashFs: "squashfs",
+ FsMagicTmpFs: "tmpfs",
+ FsMagicUnsupported: "unsupported",
+ FsMagicXfs: "xfs",
+ FsMagicZfs: "zfs",
+ }
+)
+
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
- if err := syscall.Statfs(path.Dir(rootpath), &buf); err != nil {
+ if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go
index 27933b6d66..3f36864878 100644
--- a/daemon/graphdriver/driver_unsupported.go
+++ b/daemon/graphdriver/driver_unsupported.go
@@ -1,7 +1,14 @@
-// +build !linux
+// +build !linux,!windows
package graphdriver
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "unsupported",
+ }
+)
+
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}
diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go
new file mode 100644
index 0000000000..3ba09781d4
--- /dev/null
+++ b/daemon/graphdriver/driver_windows.go
@@ -0,0 +1,26 @@
+package graphdriver
+
+type DiffDiskDriver interface {
+ Driver
+ CopyDiff(id, sourceId string) error
+}
+
+const (
+ FsMagicWindows = FsMagic(0xa1b1830f)
+)
+
+var (
+ // Slice of drivers that should be used in an order
+ priority = []string{
+ "windows",
+ }
+
+ FsNames = map[FsMagic]string{
+ FsMagicWindows: "windows",
+ FsMagicUnsupported: "unsupported",
+ }
+)
+
+func GetFSMagic(rootpath string) (FsMagic, error) {
+ return FsMagicWindows, nil
+}
diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go
index ae6bee517b..f43b117af4 100644
--- a/daemon/graphdriver/overlay/copy.go
+++ b/daemon/graphdriver/overlay/copy.go
@@ -71,9 +71,12 @@ func copyDir(srcDir, dstDir string, flags CopyFlags) error {
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
}
+ isHardlink := false
+
switch f.Mode() & os.ModeType {
case 0: // Regular file
if flags&CopyHardlink != 0 {
+ isHardlink = true
if err := os.Link(srcPath, dstPath); err != nil {
return err
}
@@ -114,6 +117,12 @@ func copyDir(srcDir, dstDir string, flags CopyFlags) error {
return fmt.Errorf("Unknown file type for %s\n", srcPath)
}
+ // Everything below is copying metadata from src to dst. All this metadata
+ // already shares an inode for hardlinks.
+ if isHardlink {
+ return nil
+ }
+
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 5b0d3b7f53..df6a7dbd93 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -318,6 +318,14 @@ func (d *Driver) Put(id string) error {
mount := d.active[id]
if mount == nil {
logrus.Debugf("Put on a non-mounted device %s", id)
+ // but it might be still here
+ if d.Exists(id) {
+ mergedDir := path.Join(d.dir(id), "merged")
+ err := syscall.Unmount(mergedDir, 0)
+ if err != nil {
+ logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
+ }
+ }
return nil
}
diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS
new file mode 100644
index 0000000000..9c270c541f
--- /dev/null
+++ b/daemon/graphdriver/zfs/MAINTAINERS
@@ -0,0 +1,2 @@
+Jörg Thalheim (@Mic92)
+Arthur Gautier (@baloose)
diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go
new file mode 100644
index 0000000000..f334f6d29f
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs.go
@@ -0,0 +1,311 @@
+// +build linux
+
+package zfs
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/daemon/graphdriver"
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/parsers"
+ zfs "github.com/mistifyio/go-zfs"
+)
+
+type ZfsOptions struct {
+ fsName string
+ mountPath string
+}
+
+func init() {
+ graphdriver.Register("zfs", Init)
+}
+
+type Logger struct{}
+
+func (*Logger) Log(cmd []string) {
+ log.Debugf("[zfs] %s", strings.Join(cmd, " "))
+}
+
+func Init(base string, opt []string) (graphdriver.Driver, error) {
+ var err error
+ options, err := parseOptions(opt)
+ if err != nil {
+ return nil, err
+ }
+ options.mountPath = base
+
+ rootdir := path.Dir(base)
+
+ if options.fsName == "" {
+ err = checkRootdirFs(rootdir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if _, err := exec.LookPath("zfs"); err != nil {
+ return nil, fmt.Errorf("zfs command is not available: %v", err)
+ }
+
+ file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+ if err != nil {
+ return nil, fmt.Errorf("cannot open /dev/zfs: %v", err)
+ }
+ defer file.Close()
+
+ if options.fsName == "" {
+ options.fsName, err = lookupZfsDataset(rootdir)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ zfs.SetLogger(new(Logger))
+
+ filesystems, err := zfs.Filesystems(options.fsName)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+ }
+
+ filesystemsCache := make(map[string]bool, len(filesystems))
+ var rootDataset *zfs.Dataset
+ for _, fs := range filesystems {
+ if fs.Name == options.fsName {
+ rootDataset = fs
+ }
+ filesystemsCache[fs.Name] = true
+ }
+
+ if rootDataset == nil {
+ return nil, fmt.Errorf("BUG: zfs get all -t filesystems -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+ }
+
+ d := &Driver{
+ dataset: rootDataset,
+ options: options,
+ filesystemsCache: filesystemsCache,
+ }
+ return graphdriver.NaiveDiffDriver(d), nil
+}
+
+func parseOptions(opt []string) (ZfsOptions, error) {
+ var options ZfsOptions
+ options.fsName = ""
+ for _, option := range opt {
+ key, val, err := parsers.ParseKeyValueOpt(option)
+ if err != nil {
+ return options, err
+ }
+ key = strings.ToLower(key)
+ switch key {
+ case "zfs.fsname":
+ options.fsName = val
+ default:
+ return options, fmt.Errorf("Unknown option %s", key)
+ }
+ }
+ return options, nil
+}
+
+func checkRootdirFs(rootdir string) error {
+ var buf syscall.Statfs_t
+ if err := syscall.Statfs(rootdir, &buf); err != nil {
+ return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ }
+
+ if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
+ log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
+ return graphdriver.ErrPrerequisites
+ }
+ return nil
+}
+
+func lookupZfsDataset(rootdir string) (string, error) {
+ var stat syscall.Stat_t
+ if err := syscall.Stat(rootdir, &stat); err != nil {
+ return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+ }
+ wantedDev := stat.Dev
+
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return "", err
+ }
+ for _, m := range mounts {
+ if err := syscall.Stat(m.Mountpoint, &stat); err != nil {
+ log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
+ continue // may fail on fuse file systems
+ }
+
+ if stat.Dev == wantedDev && m.Fstype == "zfs" {
+ return m.Source, nil
+ }
+ }
+
+ return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+}
+
+type Driver struct {
+ dataset *zfs.Dataset
+ options ZfsOptions
+ sync.Mutex // protects filesystem cache against concurrent access
+ filesystemsCache map[string]bool
+}
+
+func (d *Driver) String() string {
+ return "zfs"
+}
+
+func (d *Driver) Cleanup() error {
+ return nil
+}
+
+func (d *Driver) Status() [][2]string {
+ parts := strings.Split(d.dataset.Name, "/")
+ pool, err := zfs.GetZpool(parts[0])
+
+ var poolName, poolHealth string
+ if err == nil {
+ poolName = pool.Name
+ poolHealth = pool.Health
+ } else {
+ poolName = fmt.Sprintf("error while getting pool information %v", err)
+ poolHealth = "not available"
+ }
+
+ quota := "no"
+ if d.dataset.Quota != 0 {
+ quota = strconv.FormatUint(d.dataset.Quota, 10)
+ }
+
+ return [][2]string{
+ {"Zpool", poolName},
+ {"Zpool Health", poolHealth},
+ {"Parent Dataset", d.dataset.Name},
+ {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)},
+ {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)},
+ {"Parent Quota", quota},
+ {"Compression", d.dataset.Compression},
+ }
+}
+
+func (d *Driver) cloneFilesystem(name, parentName string) error {
+ snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond())
+ parentDataset := zfs.Dataset{Name: parentName}
+ snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
+ if err != nil {
+ return err
+ }
+
+ _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"})
+ if err == nil {
+ d.Lock()
+ d.filesystemsCache[name] = true
+ d.Unlock()
+ }
+
+ if err != nil {
+ snapshot.Destroy(zfs.DestroyDeferDeletion)
+ return err
+ }
+ return snapshot.Destroy(zfs.DestroyDeferDeletion)
+}
+
+func (d *Driver) ZfsPath(id string) string {
+ return d.options.fsName + "/" + id
+}
+
+func (d *Driver) MountPath(id string) string {
+ return path.Join(d.options.mountPath, "graph", id)
+}
+
+func (d *Driver) Create(id string, parent string) error {
+ err := d.create(id, parent)
+ if err == nil {
+ return nil
+ }
+ if zfsError, ok := err.(*zfs.Error); ok {
+ if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") {
+ return err
+ }
+ // aborted build -> cleanup
+ } else {
+ return err
+ }
+
+ dataset := zfs.Dataset{Name: d.ZfsPath(id)}
+ if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil {
+ return err
+ }
+
+ // retry
+ return d.create(id, parent)
+}
+
+func (d *Driver) create(id, parent string) error {
+ name := d.ZfsPath(id)
+ if parent == "" {
+ mountoptions := map[string]string{"mountpoint": "legacy"}
+ fs, err := zfs.CreateFilesystem(name, mountoptions)
+ if err == nil {
+ d.Lock()
+ d.filesystemsCache[fs.Name] = true
+ d.Unlock()
+ }
+ return err
+ }
+ return d.cloneFilesystem(name, d.ZfsPath(parent))
+}
+
+func (d *Driver) Remove(id string) error {
+ name := d.ZfsPath(id)
+ dataset := zfs.Dataset{Name: name}
+ err := dataset.Destroy(zfs.DestroyRecursive)
+ if err == nil {
+ d.Lock()
+ delete(d.filesystemsCache, name)
+ d.Unlock()
+ }
+ return err
+}
+
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+ mountpoint := d.MountPath(id)
+ filesystem := d.ZfsPath(id)
+ log.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, mountLabel)
+
+ // Create the target directories if they don't exist
+ if err := os.MkdirAll(mountpoint, 0755); err != nil && !os.IsExist(err) {
+ return "", err
+ }
+
+ err := mount.Mount(filesystem, mountpoint, "zfs", mountLabel)
+ if err != nil {
+ return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
+ }
+
+ return mountpoint, nil
+}
+
+func (d *Driver) Put(id string) error {
+ mountpoint := d.MountPath(id)
+ log.Debugf(`[zfs] unmount("%s")`, mountpoint)
+
+ if err := mount.Unmount(mountpoint); err != nil {
+ return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
+ }
+ return nil
+}
+
+func (d *Driver) Exists(id string) bool {
+ return d.filesystemsCache[d.ZfsPath(id)] == true
+}
diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go
new file mode 100644
index 0000000000..c20eb986aa
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs_test.go
@@ -0,0 +1,30 @@
+// +build linux
+
+package zfs
+
+import (
+ "github.com/docker/docker/daemon/graphdriver/graphtest"
+ "testing"
+)
+
+// This avoids creating a new driver for each test if all tests are run
+// Make sure to put new tests between TestZfsSetup and TestZfsTeardown
+func TestZfsSetup(t *testing.T) {
+ graphtest.GetDriver(t, "zfs")
+}
+
+func TestZfsCreateEmpty(t *testing.T) {
+ graphtest.DriverTestCreateEmpty(t, "zfs")
+}
+
+func TestZfsCreateBase(t *testing.T) {
+ graphtest.DriverTestCreateBase(t, "zfs")
+}
+
+func TestZfsCreateSnap(t *testing.T) {
+ graphtest.DriverTestCreateSnap(t, "zfs")
+}
+
+func TestZfsTeardown(t *testing.T) {
+ graphtest.PutDriver(t)
+}
diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go
new file mode 100644
index 0000000000..a30a0f68e8
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package zfs
diff --git a/daemon/info.go b/daemon/info.go
index df1c0530cc..edec5f9ff7 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -64,10 +64,12 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
DriverStatus: daemon.GraphDriver().Status(),
MemoryLimit: daemon.SystemConfig().MemoryLimit,
SwapLimit: daemon.SystemConfig().SwapLimit,
+ CpuCfsPeriod: daemon.SystemConfig().CpuCfsPeriod,
CpuCfsQuota: daemon.SystemConfig().CpuCfsQuota,
IPv4Forwarding: !daemon.SystemConfig().IPv4ForwardingDisabled,
Debug: os.Getenv("DEBUG") != "",
NFd: fileutils.GetTotalUsedFds(),
+ OomKillDisable: daemon.SystemConfig().OomKillDisable,
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
ExecutionDriver: daemon.ExecutionDriver().Name(),
@@ -83,6 +85,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
MemTotal: meminfo.MemTotal,
DockerRootDir: daemon.Config().Root,
Labels: daemon.Config().Labels,
+ ExperimentalBuild: utils.ExperimentalBuild(),
}
if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
diff --git a/daemon/inspect.go b/daemon/inspect.go
index 56db3d059b..146bd77e9e 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -10,18 +10,10 @@ import (
type ContainerJSONRaw struct {
*Container
HostConfig *runconfig.HostConfig
-}
-func (daemon *Daemon) ContainerInspectRaw(name string) (*ContainerJSONRaw, error) {
- container, err := daemon.Get(name)
- if err != nil {
- return nil, err
- }
-
- container.Lock()
- defer container.Unlock()
-
- return &ContainerJSONRaw{container, container.hostConfig}, nil
+ // Unused fields for backward compatibility with API versions < 1.12.
+ Volumes map[string]string
+ VolumesRW map[string]bool
}
func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
@@ -60,6 +52,14 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
FinishedAt: container.State.FinishedAt,
}
+ volumes := make(map[string]string)
+ volumesRW := make(map[string]bool)
+
+ for _, m := range container.MountPoints {
+ volumes[m.Destination] = m.Path()
+ volumesRW[m.Destination] = m.RW
+ }
+
contJSON := &types.ContainerJSON{
Id: container.ID,
Created: container.Created,
@@ -79,8 +79,8 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
ExecDriver: container.ExecDriver,
MountLabel: container.MountLabel,
ProcessLabel: container.ProcessLabel,
- Volumes: container.Volumes,
- VolumesRW: container.VolumesRW,
+ Volumes: volumes,
+ VolumesRW: volumesRW,
AppArmorProfile: container.AppArmorProfile,
ExecIDs: container.GetExecIDs(),
HostConfig: &hostConfig,
diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go
new file mode 100644
index 0000000000..e59345f7d4
--- /dev/null
+++ b/daemon/logdrivers_linux.go
@@ -0,0 +1,9 @@
+package daemon
+
+// Importing packages here only to make sure their init gets called and
+// therefore they register themselves to the logdriver factory.
+import (
+ _ "github.com/docker/docker/daemon/logger/journald"
+ _ "github.com/docker/docker/daemon/logger/jsonfilelog"
+ _ "github.com/docker/docker/daemon/logger/syslog"
+)
diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go
new file mode 100644
index 0000000000..5dcbe718c6
--- /dev/null
+++ b/daemon/logdrivers_windows.go
@@ -0,0 +1,7 @@
+package daemon
+
+// Importing packages here only to make sure their init gets called and
+// therefore they register themselves to the logdriver factory.
+import (
+ _ "github.com/docker/docker/daemon/logger/jsonfilelog"
+)
diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go
index 45f76ac8e8..54e60ee49c 100644
--- a/daemon/logger/copier_test.go
+++ b/daemon/logger/copier_test.go
@@ -3,6 +3,7 @@ package logger
import (
"bytes"
"encoding/json"
+ "errors"
"io"
"testing"
"time"
@@ -12,16 +13,14 @@ type TestLoggerJSON struct {
*json.Encoder
}
-func (l *TestLoggerJSON) Log(m *Message) error {
- return l.Encode(m)
-}
+func (l *TestLoggerJSON) Log(m *Message) error { return l.Encode(m) }
-func (l *TestLoggerJSON) Close() error {
- return nil
-}
+func (l *TestLoggerJSON) Close() error { return nil }
-func (l *TestLoggerJSON) Name() string {
- return "json"
+func (l *TestLoggerJSON) Name() string { return "json" }
+
+func (l *TestLoggerJSON) GetReader() (io.Reader, error) {
+ return nil, errors.New("not used in the test")
}
type TestLoggerText struct {
@@ -33,12 +32,12 @@ func (l *TestLoggerText) Log(m *Message) error {
return err
}
-func (l *TestLoggerText) Close() error {
- return nil
-}
+func (l *TestLoggerText) Close() error { return nil }
-func (l *TestLoggerText) Name() string {
- return "text"
+func (l *TestLoggerText) Name() string { return "text" }
+
+func (l *TestLoggerText) GetReader() (io.Reader, error) {
+ return nil, errors.New("not used in the test")
}
func TestCopier(t *testing.T) {
diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go
new file mode 100644
index 0000000000..80234ee4a1
--- /dev/null
+++ b/daemon/logger/factory.go
@@ -0,0 +1,57 @@
+package logger
+
+import (
+ "fmt"
+ "sync"
+)
+
+// Creator is a method that builds a logging driver instance with given context
+type Creator func(Context) (Logger, error)
+
+// Context provides enough information for a logging driver to do its function
+type Context struct {
+ Config map[string]string
+ ContainerID string
+ ContainerName string
+ LogPath string
+}
+
+type logdriverFactory struct {
+ registry map[string]Creator
+ m sync.Mutex
+}
+
+func (lf *logdriverFactory) register(name string, c Creator) error {
+ lf.m.Lock()
+ defer lf.m.Unlock()
+
+ if _, ok := lf.registry[name]; ok {
+ return fmt.Errorf("logger: log driver named '%s' is already registered", name)
+ }
+ lf.registry[name] = c
+ return nil
+}
+
+func (lf *logdriverFactory) get(name string) (Creator, error) {
+ lf.m.Lock()
+ defer lf.m.Unlock()
+
+ c, ok := lf.registry[name]
+ if !ok {
+ return c, fmt.Errorf("logger: no log driver named '%s' is registered", name)
+ }
+ return c, nil
+}
+
+var factory = &logdriverFactory{registry: make(map[string]Creator)} // global factory instance
+
+// RegisterLogDriver registers the given logging driver builder with given logging
+// driver name.
+func RegisterLogDriver(name string, c Creator) error {
+ return factory.register(name, c)
+}
+
+// GetLogDriver provides the logging driver builder for a logging driver name.
+func GetLogDriver(name string) (Creator, error) {
+ return factory.get(name)
+}
diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go
index 5eb141ac83..7aa28f0d08 100644
--- a/daemon/logger/journald/journald.go
+++ b/daemon/logger/journald/journald.go
@@ -1,21 +1,42 @@
+// +build linux
+
package journald
import (
"fmt"
+ "io"
+ "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/journal"
"github.com/docker/docker/daemon/logger"
)
+const name = "journald"
+
type Journald struct {
Jmap map[string]string
}
-func New(id string) (logger.Logger, error) {
+func init() {
+ if err := logger.RegisterLogDriver(name, New); err != nil {
+ logrus.Fatal(err)
+ }
+}
+
+func New(ctx logger.Context) (logger.Logger, error) {
if !journal.Enabled() {
return nil, fmt.Errorf("journald is not enabled on this host")
}
- jmap := map[string]string{"MESSAGE_ID": id}
+ // Strip a leading slash so that people can search for
+ // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo.
+ name := ctx.ContainerName
+ if name[0] == '/' {
+ name = name[1:]
+ }
+ jmap := map[string]string{
+ "CONTAINER_ID": ctx.ContainerID[:12],
+ "CONTAINER_ID_FULL": ctx.ContainerID,
+ "CONTAINER_NAME": name}
return &Journald{Jmap: jmap}, nil
}
@@ -31,5 +52,9 @@ func (s *Journald) Close() error {
}
func (s *Journald) Name() string {
- return "Journald"
+ return name
+}
+
+func (s *Journald) GetReader() (io.Reader, error) {
+ return nil, logger.ReadLogsNotSupported
}
diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go
new file mode 100644
index 0000000000..110833c23b
--- /dev/null
+++ b/daemon/logger/journald/journald_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package journald
diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go
index 50293181fd..3931e270c0 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog.go
@@ -2,31 +2,46 @@ package jsonfilelog
import (
"bytes"
+ "io"
"os"
"sync"
+ "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/timeutils"
)
+const (
+ Name = "json-file"
+)
+
// JSONFileLogger is Logger implementation for default docker logging:
// JSON objects to file
type JSONFileLogger struct {
buf *bytes.Buffer
f *os.File // store for closing
mu sync.Mutex // protects buffer
+
+ ctx logger.Context
+}
+
+func init() {
+ if err := logger.RegisterLogDriver(Name, New); err != nil {
+ logrus.Fatal(err)
+ }
}
// New creates new JSONFileLogger which writes to filename
-func New(filename string) (logger.Logger, error) {
- log, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
+func New(ctx logger.Context) (logger.Logger, error) {
+ log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
return &JSONFileLogger{
f: log,
buf: bytes.NewBuffer(nil),
+ ctx: ctx,
}, nil
}
@@ -34,6 +49,7 @@ func New(filename string) (logger.Logger, error) {
func (l *JSONFileLogger) Log(msg *logger.Message) error {
l.mu.Lock()
defer l.mu.Unlock()
+
timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp)
if err != nil {
return err
@@ -52,6 +68,14 @@ func (l *JSONFileLogger) Log(msg *logger.Message) error {
return nil
}
+func (l *JSONFileLogger) GetReader() (io.Reader, error) {
+ return os.Open(l.ctx.LogPath)
+}
+
+func (l *JSONFileLogger) LogPath() string {
+ return l.ctx.LogPath
+}
+
// Close closes underlying file
func (l *JSONFileLogger) Close() error {
return l.f.Close()
@@ -59,5 +83,5 @@ func (l *JSONFileLogger) Close() error {
// Name returns name of this logger
func (l *JSONFileLogger) Name() string {
- return "JSONFile"
+ return Name
}
diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go
index e951c1b869..568650b93c 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog_test.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go
@@ -12,18 +12,22 @@ import (
)
func TestJSONFileLogger(t *testing.T) {
+ cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
tmp, err := ioutil.TempDir("", "docker-logger-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
filename := filepath.Join(tmp, "container.log")
- l, err := New(filename)
+ l, err := New(logger.Context{
+ ContainerID: cid,
+ LogPath: filename,
+ })
if err != nil {
t.Fatal(err)
}
defer l.Close()
- cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
+
if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line1"), Source: "src1"}); err != nil {
t.Fatal(err)
}
@@ -48,18 +52,22 @@ func TestJSONFileLogger(t *testing.T) {
}
func BenchmarkJSONFileLogger(b *testing.B) {
+ cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
tmp, err := ioutil.TempDir("", "docker-logger-")
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(tmp)
filename := filepath.Join(tmp, "container.log")
- l, err := New(filename)
+ l, err := New(logger.Context{
+ ContainerID: cid,
+ LogPath: filename,
+ })
if err != nil {
b.Fatal(err)
}
defer l.Close()
- cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
+
testLine := "Line that thinks that it is log line from docker\n"
msg := &logger.Message{ContainerID: cid, Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()}
jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON()
diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go
index 078e67d8e9..29aafd380b 100644
--- a/daemon/logger/logger.go
+++ b/daemon/logger/logger.go
@@ -1,6 +1,12 @@
package logger
-import "time"
+import (
+ "errors"
+ "io"
+ "time"
+)
+
+var ReadLogsNotSupported = errors.New("configured logging reader does not support reading")
// Message is datastructure that represents record from some container
type Message struct {
@@ -15,4 +21,5 @@ type Logger interface {
Log(*Message) error
Name() string
Close() error
+ GetReader() (io.Reader, error)
}
diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go
index a250d6e93c..6a6377f833 100644
--- a/daemon/logger/syslog/syslog.go
+++ b/daemon/logger/syslog/syslog.go
@@ -1,23 +1,37 @@
+// +build linux
+
package syslog
import (
"fmt"
+ "io"
"log/syslog"
"os"
"path"
+ "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger"
)
+const name = "syslog"
+
type Syslog struct {
writer *syslog.Writer
}
-func New(tag string) (logger.Logger, error) {
+func init() {
+ if err := logger.RegisterLogDriver(name, New); err != nil {
+ logrus.Fatal(err)
+ }
+}
+
+func New(ctx logger.Context) (logger.Logger, error) {
+ tag := ctx.ContainerID[:12]
log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag))
if err != nil {
return nil, err
}
+
return &Syslog{
writer: log,
}, nil
@@ -35,5 +49,9 @@ func (s *Syslog) Close() error {
}
func (s *Syslog) Name() string {
- return "Syslog"
+ return name
+}
+
+func (s *Syslog) GetReader() (io.Reader, error) {
+ return nil, logger.ReadLogsNotSupported
}
diff --git a/daemon/logger/syslog/syslog_unsupported.go b/daemon/logger/syslog/syslog_unsupported.go
new file mode 100644
index 0000000000..50cc51b657
--- /dev/null
+++ b/daemon/logger/syslog/syslog_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package syslog
diff --git a/daemon/logs.go b/daemon/logs.go
index 79d4044bbe..d388b9c1b6 100644
--- a/daemon/logs.go
+++ b/daemon/logs.go
@@ -5,11 +5,14 @@ import (
"encoding/json"
"fmt"
"io"
+ "net"
"os"
"strconv"
- "sync"
+ "syscall"
+ "time"
"github.com/Sirupsen/logrus"
+ "github.com/docker/docker/daemon/logger/jsonfilelog"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/tailfile"
@@ -19,6 +22,7 @@ import (
type ContainerLogsConfig struct {
Follow, Timestamps bool
Tail string
+ Since time.Time
UseStdout, UseStderr bool
OutStream io.Writer
}
@@ -54,32 +58,15 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
errStream = outStream
}
- if container.LogDriverType() != "json-file" {
+ if container.LogDriverType() != jsonfilelog.Name {
return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
}
- cLog, err := container.ReadLog("json")
- if err != nil && os.IsNotExist(err) {
- // Legacy logs
- logrus.Debugf("Old logs format")
- if config.UseStdout {
- cLog, err := container.ReadLog("stdout")
- if err != nil {
- logrus.Errorf("Error reading logs (stdout): %s", err)
- } else if _, err := io.Copy(outStream, cLog); err != nil {
- logrus.Errorf("Error streaming logs (stdout): %s", err)
- }
- }
- if config.UseStderr {
- cLog, err := container.ReadLog("stderr")
- if err != nil {
- logrus.Errorf("Error reading logs (stderr): %s", err)
- } else if _, err := io.Copy(errStream, cLog); err != nil {
- logrus.Errorf("Error streaming logs (stderr): %s", err)
- }
- }
- } else if err != nil {
- logrus.Errorf("Error reading logs (json): %s", err)
+ logDriver, err := container.getLogger()
+ cLog, err := logDriver.GetReader()
+ if err != nil {
+ logrus.Errorf("Error reading logs: %s", err)
} else {
+ // json-file driver
if config.Tail != "all" {
var err error
lines, err = strconv.Atoi(config.Tail)
@@ -88,6 +75,7 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
lines = -1
}
}
+
if lines != 0 {
if lines > 0 {
f := cLog.(*os.File)
@@ -101,9 +89,11 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
}
cLog = tmp
}
+
dec := json.NewDecoder(cLog)
l := &jsonlog.JSONLog{}
for {
+ l.Reset()
if err := dec.Decode(l); err == io.EOF {
break
} else if err != nil {
@@ -111,6 +101,9 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
break
}
logLine := l.Log
+ if !config.Since.IsZero() && l.Created.Before(config.Since) {
+ continue
+ }
if config.Timestamps {
// format can be "" or time format, so here can't be error
logLine, _ = l.Format(format)
@@ -121,42 +114,50 @@ func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) er
if l.Stream == "stderr" && config.UseStderr {
io.WriteString(errStream, logLine)
}
- l.Reset()
}
}
}
+
if config.Follow && container.IsRunning() {
- errors := make(chan error, 2)
- wg := sync.WaitGroup{}
+ chErr := make(chan error)
+ var stdoutPipe, stderrPipe io.ReadCloser
+
+ // write an empty chunk of data (this is to ensure that the
+ // HTTP Response is sent immediatly, even if the container has
+ // not yet produced any data)
+ outStream.Write(nil)
if config.UseStdout {
- wg.Add(1)
- stdoutPipe := container.StdoutLogPipe()
- defer stdoutPipe.Close()
+ stdoutPipe = container.StdoutLogPipe()
go func() {
- errors <- jsonlog.WriteLog(stdoutPipe, outStream, format)
- wg.Done()
+ logrus.Debug("logs: stdout stream begin")
+ chErr <- jsonlog.WriteLog(stdoutPipe, outStream, format, config.Since)
+ logrus.Debug("logs: stdout stream end")
}()
}
if config.UseStderr {
- wg.Add(1)
- stderrPipe := container.StderrLogPipe()
- defer stderrPipe.Close()
+ stderrPipe = container.StderrLogPipe()
go func() {
- errors <- jsonlog.WriteLog(stderrPipe, errStream, format)
- wg.Done()
+ logrus.Debug("logs: stderr stream begin")
+ chErr <- jsonlog.WriteLog(stderrPipe, errStream, format, config.Since)
+ logrus.Debug("logs: stderr stream end")
}()
}
- wg.Wait()
- close(errors)
+ err = <-chErr
+ if stdoutPipe != nil {
+ stdoutPipe.Close()
+ }
+ if stderrPipe != nil {
+ stderrPipe.Close()
+ }
+ <-chErr // wait for 2nd goroutine to exit, otherwise bad things will happen
- for err := range errors {
- if err != nil {
- logrus.Errorf("%s", err)
+ if err != nil && err != io.EOF && err != io.ErrClosedPipe {
+ if e, ok := err.(*net.OpError); ok && e.Err != syscall.EPIPE {
+ logrus.Errorf("error streaming logs: %v", err)
}
}
-
}
return nil
}
diff --git a/daemon/monitor.go b/daemon/monitor.go
index 293849dd36..dfade8e218 100644
--- a/daemon/monitor.go
+++ b/daemon/monitor.go
@@ -223,10 +223,10 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
return false
}
- switch m.restartPolicy.Name {
- case "always":
+ switch {
+ case m.restartPolicy.IsAlways():
return true
- case "on-failure":
+ case m.restartPolicy.IsOnFailure():
// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
diff --git a/daemon/network/settings.go b/daemon/network/settings.go
index f3841f09b1..ca60ff1980 100644
--- a/daemon/network/settings.go
+++ b/daemon/network/settings.go
@@ -2,17 +2,28 @@ package network
import "github.com/docker/docker/nat"
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
type Settings struct {
- IPAddress string
- IPPrefixLen int
- MacAddress string
- LinkLocalIPv6Address string
- LinkLocalIPv6PrefixLen int
+ Bridge string
+ EndpointID string
+ Gateway string
GlobalIPv6Address string
GlobalIPv6PrefixLen int
- Gateway string
+ HairpinMode bool
+ IPAddress string
+ IPPrefixLen int
IPv6Gateway string
- Bridge string
+ LinkLocalIPv6Address string
+ LinkLocalIPv6PrefixLen int
+ MacAddress string
+ NetworkID string
PortMapping map[string]map[string]string // Deprecated
Ports nat.PortMap
+ SandboxKey string
+ SecondaryIPAddresses []Address
+ SecondaryIPv6Addresses []Address
}
diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go
deleted file mode 100644
index 2fe04d2065..0000000000
--- a/daemon/networkdriver/bridge/driver.go
+++ /dev/null
@@ -1,784 +0,0 @@
-package bridge
-
-import (
- "encoding/hex"
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "os/exec"
- "strconv"
- "strings"
- "sync"
-
- "github.com/Sirupsen/logrus"
- "github.com/docker/docker/daemon/network"
- "github.com/docker/docker/daemon/networkdriver"
- "github.com/docker/docker/daemon/networkdriver/ipallocator"
- "github.com/docker/docker/daemon/networkdriver/portmapper"
- "github.com/docker/docker/nat"
- "github.com/docker/docker/pkg/iptables"
- "github.com/docker/docker/pkg/parsers/kernel"
- "github.com/docker/docker/pkg/resolvconf"
- "github.com/docker/libcontainer/netlink"
-)
-
-const (
- DefaultNetworkBridge = "docker0"
- MaxAllocatedPortAttempts = 10
-)
-
-// Network interface represents the networking stack of a container
-type networkInterface struct {
- IP net.IP
- IPv6 net.IP
- PortMappings []net.Addr // There are mappings to the host interfaces
-}
-
-type ifaces struct {
- c map[string]*networkInterface
- sync.Mutex
-}
-
-func (i *ifaces) Set(key string, n *networkInterface) {
- i.Lock()
- i.c[key] = n
- i.Unlock()
-}
-
-func (i *ifaces) Get(key string) *networkInterface {
- i.Lock()
- res := i.c[key]
- i.Unlock()
- return res
-}
-
-var (
- addrs = []string{
- // Here we don't follow the convention of using the 1st IP of the range for the gateway.
- // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
- // In theory this shouldn't matter - in practice there's bound to be a few scripts relying
- // on the internal addressing or other things like that.
- // They shouldn't, but hey, let's not break them unless we really have to.
- "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23
- "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive
- "10.1.42.1/16",
- "10.42.42.1/16",
- "172.16.42.1/24",
- "172.16.43.1/24",
- "172.16.44.1/24",
- "10.0.42.1/24",
- "10.0.43.1/24",
- "192.168.42.1/24",
- "192.168.43.1/24",
- "192.168.44.1/24",
- }
-
- bridgeIface string
- bridgeIPv4Network *net.IPNet
- gatewayIPv4 net.IP
- bridgeIPv6Addr net.IP
- globalIPv6Network *net.IPNet
- gatewayIPv6 net.IP
- portMapper *portmapper.PortMapper
- once sync.Once
-
- defaultBindingIP = net.ParseIP("0.0.0.0")
- currentInterfaces = ifaces{c: make(map[string]*networkInterface)}
- ipAllocator = ipallocator.New()
-)
-
-func initPortMapper() {
- once.Do(func() {
- portMapper = portmapper.New()
- })
-}
-
-type Config struct {
- EnableIPv6 bool
- EnableIptables bool
- EnableIpForward bool
- EnableIpMasq bool
- DefaultIp net.IP
- Iface string
- IP string
- FixedCIDR string
- FixedCIDRv6 string
- DefaultGatewayIPv4 string
- DefaultGatewayIPv6 string
- InterContainerCommunication bool
-}
-
-func InitDriver(config *Config) error {
- var (
- networkv4 *net.IPNet
- networkv6 *net.IPNet
- addrv4 net.Addr
- addrsv6 []net.Addr
- bridgeIPv6 = "fe80::1/64"
- )
-
- // try to modprobe bridge first
- // see gh#12177
- if out, err := exec.Command("modprobe", "-va", "bridge", "nf_nat").Output(); err != nil {
- logrus.Warnf("Running modprobe bridge nf_nat failed with message: %s, error: %v", out, err)
- }
-
- initPortMapper()
-
- if config.DefaultIp != nil {
- defaultBindingIP = config.DefaultIp
- }
-
- bridgeIface = config.Iface
- usingDefaultBridge := false
- if bridgeIface == "" {
- usingDefaultBridge = true
- bridgeIface = DefaultNetworkBridge
- }
-
- addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)
-
- if err != nil {
- // No Bridge existent, create one
- // If we're not using the default bridge, fail without trying to create it
- if !usingDefaultBridge {
- return err
- }
-
- logrus.Info("Bridge interface not found, trying to create it")
-
- // If the iface is not found, try to create it
- if err := configureBridge(config.IP, bridgeIPv6, config.EnableIPv6); err != nil {
- logrus.Errorf("Could not configure Bridge: %s", err)
- return err
- }
-
- addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
- if err != nil {
- return err
- }
-
- if config.FixedCIDRv6 != "" {
- // Setting route to global IPv6 subnet
- logrus.Infof("Adding route to IPv6 network %q via device %q", config.FixedCIDRv6, bridgeIface)
- if err := netlink.AddRoute(config.FixedCIDRv6, "", "", bridgeIface); err != nil {
- logrus.Fatalf("Could not add route to IPv6 network %q via device %q", config.FixedCIDRv6, bridgeIface)
- }
- }
- } else {
- // Bridge exists already, getting info...
- // Validate that the bridge ip matches the ip specified by BridgeIP
- if config.IP != "" {
- networkv4 = addrv4.(*net.IPNet)
- bip, _, err := net.ParseCIDR(config.IP)
- if err != nil {
- return err
- }
- if !networkv4.IP.Equal(bip) {
- return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
- }
- }
-
- // A bridge might exist but not have any IPv6 addr associated with it yet
- // (for example, an existing Docker installation that has only been used
- // with IPv4 and docker0 already is set up) In that case, we can perform
- // the bridge init for IPv6 here, else we will error out below if --ipv6=true
- if len(addrsv6) == 0 && config.EnableIPv6 {
- if err := setupIPv6Bridge(bridgeIPv6); err != nil {
- return err
- }
- // Recheck addresses now that IPv6 is setup on the bridge
- addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
- if err != nil {
- return err
- }
- }
-
- // TODO: Check if route to config.FixedCIDRv6 is set
- }
-
- if config.EnableIPv6 {
- bip6, _, err := net.ParseCIDR(bridgeIPv6)
- if err != nil {
- return err
- }
- found := false
- for _, addrv6 := range addrsv6 {
- networkv6 = addrv6.(*net.IPNet)
- if networkv6.IP.Equal(bip6) {
- found = true
- break
- }
- }
- if !found {
- return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
- }
- }
-
- networkv4 = addrv4.(*net.IPNet)
-
- if config.EnableIPv6 {
- if len(addrsv6) == 0 {
- return errors.New("IPv6 enabled but no IPv6 detected")
- }
- bridgeIPv6Addr = networkv6.IP
- }
-
- if config.EnableIptables {
- iptables.FirewalldInit()
- }
-
- // Configure iptables for link support
- if config.EnableIptables {
- if err := setupIPTables(addrv4, config.InterContainerCommunication, config.EnableIpMasq); err != nil {
- logrus.Errorf("Error configuring iptables: %s", err)
- return err
- }
- // call this on Firewalld reload
- iptables.OnReloaded(func() { setupIPTables(addrv4, config.InterContainerCommunication, config.EnableIpMasq) })
- }
-
- if config.EnableIpForward {
- // Enable IPv4 forwarding
- if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
- logrus.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
- }
-
- if config.FixedCIDRv6 != "" {
- // Enable IPv6 forwarding
- if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
- logrus.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
- }
- if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
- logrus.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
- }
- }
- }
-
- // We can always try removing the iptables
- if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
- return err
- }
-
- if config.EnableIptables {
- _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
- if err != nil {
- return err
- }
- // call this on Firewalld reload
- iptables.OnReloaded(func() { iptables.NewChain("DOCKER", bridgeIface, iptables.Nat) })
-
- chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
- if err != nil {
- return err
- }
- // call this on Firewalld reload
- iptables.OnReloaded(func() { iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) })
-
- portMapper.SetIptablesChain(chain)
- }
-
- bridgeIPv4Network = networkv4
- if config.FixedCIDR != "" {
- _, subnet, err := net.ParseCIDR(config.FixedCIDR)
- if err != nil {
- return err
- }
- logrus.Debugf("Subnet: %v", subnet)
- if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
- logrus.Errorf("Error registering subnet for IPv4 bridge network: %s", err)
- return err
- }
- }
-
- if gateway, err := requestDefaultGateway(config.DefaultGatewayIPv4, bridgeIPv4Network); err != nil {
- return err
- } else {
- gatewayIPv4 = gateway
- }
-
- if config.FixedCIDRv6 != "" {
- _, subnet, err := net.ParseCIDR(config.FixedCIDRv6)
- if err != nil {
- return err
- }
- logrus.Debugf("Subnet: %v", subnet)
- if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
- logrus.Errorf("Error registering subnet for IPv6 bridge network: %s", err)
- return err
- }
- globalIPv6Network = subnet
-
- if gateway, err := requestDefaultGateway(config.DefaultGatewayIPv6, globalIPv6Network); err != nil {
- return err
- } else {
- gatewayIPv6 = gateway
- }
- }
-
- // Block BridgeIP in IP allocator
- ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
-
- if config.EnableIptables {
- iptables.OnReloaded(portMapper.ReMapAll) // call this on Firewalld reload
- }
-
- return nil
-}
-
-func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
- // Enable NAT
-
- if ipmasq {
- natArgs := []string{"-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"}
-
- if !iptables.Exists(iptables.Nat, "POSTROUTING", natArgs...) {
- if output, err := iptables.Raw(append([]string{
- "-t", string(iptables.Nat), "-I", "POSTROUTING"}, natArgs...)...); err != nil {
- return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
- } else if len(output) != 0 {
- return iptables.ChainError{Chain: "POSTROUTING", Output: output}
- }
- }
- }
-
- var (
- args = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"}
- acceptArgs = append(args, "ACCEPT")
- dropArgs = append(args, "DROP")
- )
-
- if !icc {
- iptables.Raw(append([]string{"-D", "FORWARD"}, acceptArgs...)...)
-
- if !iptables.Exists(iptables.Filter, "FORWARD", dropArgs...) {
- logrus.Debugf("Disable inter-container communication")
- if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, dropArgs...)...); err != nil {
- return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
- } else if len(output) != 0 {
- return fmt.Errorf("Error disabling intercontainer communication: %s", output)
- }
- }
- } else {
- iptables.Raw(append([]string{"-D", "FORWARD"}, dropArgs...)...)
-
- if !iptables.Exists(iptables.Filter, "FORWARD", acceptArgs...) {
- logrus.Debugf("Enable inter-container communication")
- if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, acceptArgs...)...); err != nil {
- return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
- } else if len(output) != 0 {
- return fmt.Errorf("Error enabling intercontainer communication: %s", output)
- }
- }
- }
-
- // Accept all non-intercontainer outgoing packets
- outgoingArgs := []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}
- if !iptables.Exists(iptables.Filter, "FORWARD", outgoingArgs...) {
- if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, outgoingArgs...)...); err != nil {
- return fmt.Errorf("Unable to allow outgoing packets: %s", err)
- } else if len(output) != 0 {
- return iptables.ChainError{Chain: "FORWARD outgoing", Output: output}
- }
- }
-
- // Accept incoming packets for existing connections
- existingArgs := []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}
-
- if !iptables.Exists(iptables.Filter, "FORWARD", existingArgs...) {
- if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, existingArgs...)...); err != nil {
- return fmt.Errorf("Unable to allow incoming packets: %s", err)
- } else if len(output) != 0 {
- return iptables.ChainError{Chain: "FORWARD incoming", Output: output}
- }
- }
- return nil
-}
-
-func RequestPort(ip net.IP, proto string, port int) (int, error) {
- initPortMapper()
- return portMapper.Allocator.RequestPort(ip, proto, port)
-}
-
-// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host
-// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges
-// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
-// bridge (fixes issue #8444)
-// If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
-func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error {
- nameservers := []string{}
- resolvConf, _ := resolvconf.Get()
- // We don't check for an error here, because we don't really care
- // if we can't read /etc/resolv.conf. So instead we skip the append
- // if resolvConf is nil. It either doesn't exist, or we can't read it
- // for some reason.
- if resolvConf != nil {
- nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...)
- }
-
- var ifaceAddr string
- if len(bridgeIP) != 0 {
- _, _, err := net.ParseCIDR(bridgeIP)
- if err != nil {
- return err
- }
- ifaceAddr = bridgeIP
- } else {
- for _, addr := range addrs {
- _, dockerNetwork, err := net.ParseCIDR(addr)
- if err != nil {
- return err
- }
- if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil {
- if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil {
- ifaceAddr = addr
- break
- } else {
- logrus.Debugf("%s %s", addr, err)
- }
- }
- }
- }
-
- if ifaceAddr == "" {
- return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface)
- }
- logrus.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
-
- if err := createBridgeIface(bridgeIface); err != nil {
- // The bridge may already exist, therefore we can ignore an "exists" error
- if !os.IsExist(err) {
- return err
- }
- }
-
- iface, err := net.InterfaceByName(bridgeIface)
- if err != nil {
- return err
- }
-
- ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr)
- if err != nil {
- return err
- }
-
- if err := netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil {
- return fmt.Errorf("Unable to add private network: %s", err)
- }
-
- if enableIPv6 {
- if err := setupIPv6Bridge(bridgeIPv6); err != nil {
- return err
- }
- }
-
- if err := netlink.NetworkLinkUp(iface); err != nil {
- return fmt.Errorf("Unable to start network bridge: %s", err)
- }
- return nil
-}
-
-func setupIPv6Bridge(bridgeIPv6 string) error {
-
- iface, err := net.InterfaceByName(bridgeIface)
- if err != nil {
- return err
- }
- // Enable IPv6 on the bridge
- procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6"
- if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil {
- return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err)
- }
-
- ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6)
- if err != nil {
- return fmt.Errorf("Unable to parse bridge IPv6 address: %q, error: %v", bridgeIPv6, err)
- }
-
- if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil {
- return fmt.Errorf("Unable to add private IPv6 network: %v", err)
- }
-
- return nil
-}
-
-func requestDefaultGateway(requestedGateway string, network *net.IPNet) (gateway net.IP, err error) {
- if requestedGateway != "" {
- gateway = net.ParseIP(requestedGateway)
-
- if gateway == nil {
- return nil, fmt.Errorf("Bad parameter: invalid gateway ip %s", requestedGateway)
- }
-
- if !network.Contains(gateway) {
- return nil, fmt.Errorf("Gateway ip %s must be part of the network %s", requestedGateway, network.String())
- }
-
- ipAllocator.RequestIP(network, gateway)
- }
-
- return gateway, nil
-}
-
-func createBridgeIface(name string) error {
- kv, err := kernel.GetKernelVersion()
- // Only set the bridge's mac address if the kernel version is > 3.3
- // before that it was not supported
- setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3)
- logrus.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
- return netlink.CreateBridge(name, setBridgeMacAddr)
-}
-
-// Generate a IEEE802 compliant MAC address from the given IP address.
-//
-// The generator is guaranteed to be consistent: the same IP will always yield the same
-// MAC address. This is to avoid ARP cache issues.
-func generateMacAddr(ip net.IP) net.HardwareAddr {
- hw := make(net.HardwareAddr, 6)
-
- // The first byte of the MAC address has to comply with these rules:
- // 1. Unicast: Set the least-significant bit to 0.
- // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1.
- // 3. As "small" as possible: The veth address has to be "smaller" than the bridge address.
- hw[0] = 0x02
-
- // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI).
- // Since this address is locally administered, we can do whatever we want as long as
- // it doesn't conflict with other addresses.
- hw[1] = 0x42
-
- // Insert the IP address into the last 32 bits of the MAC address.
- // This is a simple way to guarantee the address will be consistent and unique.
- copy(hw[2:], ip.To4())
-
- return hw
-}
-
-func linkLocalIPv6FromMac(mac string) (string, error) {
- hx := strings.Replace(mac, ":", "", -1)
- hw, err := hex.DecodeString(hx)
- if err != nil {
- return "", errors.New("Could not parse MAC address " + mac)
- }
-
- hw[0] ^= 0x2
-
- return fmt.Sprintf("fe80::%x%x:%xff:fe%x:%x%x/64", hw[0], hw[1], hw[2], hw[3], hw[4], hw[5]), nil
-}
-
-// Allocate a network interface
-func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Settings, error) {
- var (
- ip net.IP
- mac net.HardwareAddr
- err error
- globalIPv6 net.IP
- defaultGWIPv4 net.IP
- defaultGWIPv6 net.IP
- )
-
- ip, err = ipAllocator.RequestIP(bridgeIPv4Network, net.ParseIP(requestedIP))
- if err != nil {
- return nil, err
- }
-
- // If no explicit mac address was given, generate a random one.
- if mac, err = net.ParseMAC(requestedMac); err != nil {
- mac = generateMacAddr(ip)
- }
-
- if globalIPv6Network != nil {
- // If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
- netmaskOnes, _ := globalIPv6Network.Mask.Size()
- ipv6 := net.ParseIP(requestedIPv6)
- if ipv6 == nil && netmaskOnes <= 80 {
- ipv6 = make(net.IP, len(globalIPv6Network.IP))
- copy(ipv6, globalIPv6Network.IP)
- for i, h := range mac {
- ipv6[i+10] = h
- }
- }
-
- globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, ipv6)
- if err != nil {
- logrus.Errorf("Allocator: RequestIP v6: %v", err)
- return nil, err
- }
- logrus.Infof("Allocated IPv6 %s", globalIPv6)
- }
-
- maskSize, _ := bridgeIPv4Network.Mask.Size()
-
- if gatewayIPv4 != nil {
- defaultGWIPv4 = gatewayIPv4
- } else {
- defaultGWIPv4 = bridgeIPv4Network.IP
- }
-
- if gatewayIPv6 != nil {
- defaultGWIPv6 = gatewayIPv6
- } else {
- defaultGWIPv6 = bridgeIPv6Addr
- }
-
- // If linklocal IPv6
- localIPv6Net, err := linkLocalIPv6FromMac(mac.String())
- if err != nil {
- return nil, err
- }
- localIPv6, _, _ := net.ParseCIDR(localIPv6Net)
-
- networkSettings := &network.Settings{
- IPAddress: ip.String(),
- Gateway: defaultGWIPv4.String(),
- MacAddress: mac.String(),
- Bridge: bridgeIface,
- IPPrefixLen: maskSize,
- LinkLocalIPv6Address: localIPv6.String(),
- }
-
- if globalIPv6Network != nil {
- networkSettings.GlobalIPv6Address = globalIPv6.String()
- maskV6Size, _ := globalIPv6Network.Mask.Size()
- networkSettings.GlobalIPv6PrefixLen = maskV6Size
- networkSettings.IPv6Gateway = defaultGWIPv6.String()
- }
-
- currentInterfaces.Set(id, &networkInterface{
- IP: ip,
- IPv6: globalIPv6,
- })
-
- return networkSettings, nil
-}
-
-// Release an interface for a select ip
-func Release(id string) {
- var containerInterface = currentInterfaces.Get(id)
-
- if containerInterface == nil {
- logrus.Warnf("No network information to release for %s", id)
- return
- }
-
- for _, nat := range containerInterface.PortMappings {
- if err := portMapper.Unmap(nat); err != nil {
- logrus.Infof("Unable to unmap port %s: %s", nat, err)
- }
- }
-
- if err := ipAllocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
- logrus.Infof("Unable to release IPv4 %s", err)
- }
- if globalIPv6Network != nil {
- if err := ipAllocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
- logrus.Infof("Unable to release IPv6 %s", err)
- }
- }
-}
-
-// Allocate an external port and map it to the interface
-func AllocatePort(id string, port nat.Port, binding nat.PortBinding) (nat.PortBinding, error) {
- var (
- ip = defaultBindingIP
- proto = port.Proto()
- containerPort = port.Int()
- network = currentInterfaces.Get(id)
- )
-
- if binding.HostIp != "" {
- ip = net.ParseIP(binding.HostIp)
- if ip == nil {
- return nat.PortBinding{}, fmt.Errorf("Bad parameter: invalid host ip %s", binding.HostIp)
- }
- }
-
- // host ip, proto, and host port
- var container net.Addr
- switch proto {
- case "tcp":
- container = &net.TCPAddr{IP: network.IP, Port: containerPort}
- case "udp":
- container = &net.UDPAddr{IP: network.IP, Port: containerPort}
- default:
- return nat.PortBinding{}, fmt.Errorf("unsupported address type %s", proto)
- }
-
- //
- // Try up to 10 times to get a port that's not already allocated.
- //
- // In the event of failure to bind, return the error that portmapper.Map
- // yields.
- //
-
- var (
- host net.Addr
- err error
- )
- hostPort, err := nat.ParsePort(binding.HostPort)
- if err != nil {
- return nat.PortBinding{}, err
- }
- for i := 0; i < MaxAllocatedPortAttempts; i++ {
- if host, err = portMapper.Map(container, ip, hostPort); err == nil {
- break
- }
- // There is no point in immediately retrying to map an explicitly
- // chosen port.
- if hostPort != 0 {
- logrus.Warnf("Failed to allocate and map port %d: %s", hostPort, err)
- break
- }
- logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
- }
-
- if err != nil {
- return nat.PortBinding{}, err
- }
-
- network.PortMappings = append(network.PortMappings, host)
-
- switch netAddr := host.(type) {
- case *net.TCPAddr:
- return nat.PortBinding{HostIp: netAddr.IP.String(), HostPort: strconv.Itoa(netAddr.Port)}, nil
- case *net.UDPAddr:
- return nat.PortBinding{HostIp: netAddr.IP.String(), HostPort: strconv.Itoa(netAddr.Port)}, nil
- default:
- return nat.PortBinding{}, fmt.Errorf("unsupported address type %T", netAddr)
- }
-}
-
-//TODO: should it return something more than just an error?
-func LinkContainers(action, parentIP, childIP string, ports []nat.Port, ignoreErrors bool) error {
- var nfAction iptables.Action
-
- switch action {
- case "-A":
- nfAction = iptables.Append
- case "-I":
- nfAction = iptables.Insert
- case "-D":
- nfAction = iptables.Delete
- default:
- return fmt.Errorf("Invalid action '%s' specified", action)
- }
-
- ip1 := net.ParseIP(parentIP)
- if ip1 == nil {
- return fmt.Errorf("Parent IP '%s' is invalid", parentIP)
- }
- ip2 := net.ParseIP(childIP)
- if ip2 == nil {
- return fmt.Errorf("Child IP '%s' is invalid", childIP)
- }
-
- chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface}
- for _, port := range ports {
- if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/daemon/networkdriver/bridge/driver_test.go b/daemon/networkdriver/bridge/driver_test.go
deleted file mode 100644
index d18882e664..0000000000
--- a/daemon/networkdriver/bridge/driver_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package bridge
-
-import (
- "fmt"
- "net"
- "strconv"
- "testing"
-
- "github.com/docker/docker/daemon/network"
- "github.com/docker/docker/daemon/networkdriver/portmapper"
- "github.com/docker/docker/nat"
- "github.com/docker/docker/pkg/iptables"
-)
-
-func init() {
- // reset the new proxy command for mocking out the userland proxy in tests
- portmapper.NewProxy = portmapper.NewMockProxyCommand
-}
-
-func findFreePort(t *testing.T) string {
- l, err := net.Listen("tcp", ":0")
- if err != nil {
- t.Fatal("Failed to find a free port")
- }
- defer l.Close()
-
- result, err := net.ResolveTCPAddr("tcp", l.Addr().String())
- if err != nil {
- t.Fatal("Failed to resolve address to identify free port")
- }
- return strconv.Itoa(result.Port)
-}
-
-func TestAllocatePortDetection(t *testing.T) {
- freePort := findFreePort(t)
-
- if err := InitDriver(new(Config)); err != nil {
- t.Fatal("Failed to initialize network driver")
- }
-
- // Allocate interface
- if _, err := Allocate("container_id", "", "", ""); err != nil {
- t.Fatal("Failed to allocate network interface")
- }
-
- port := nat.Port(freePort + "/tcp")
- binding := nat.PortBinding{HostIp: "127.0.0.1", HostPort: freePort}
-
- // Allocate same port twice, expect failure on second call
- if _, err := AllocatePort("container_id", port, binding); err != nil {
- t.Fatal("Failed to find a free port to allocate")
- }
- if _, err := AllocatePort("container_id", port, binding); err == nil {
- t.Fatal("Duplicate port allocation granted by AllocatePort")
- }
-}
-
-func TestHostnameFormatChecking(t *testing.T) {
- freePort := findFreePort(t)
-
- if err := InitDriver(new(Config)); err != nil {
- t.Fatal("Failed to initialize network driver")
- }
-
- // Allocate interface
- if _, err := Allocate("container_id", "", "", ""); err != nil {
- t.Fatal("Failed to allocate network interface")
- }
-
- port := nat.Port(freePort + "/tcp")
- binding := nat.PortBinding{HostIp: "localhost", HostPort: freePort}
-
- if _, err := AllocatePort("container_id", port, binding); err == nil {
- t.Fatal("Failed to check invalid HostIP")
- }
-}
-
-func newInterfaceAllocation(t *testing.T, globalIPv6 *net.IPNet, requestedMac, requestedIP, requestedIPv6 string, expectFail bool) *network.Settings {
- // set IPv6 global if given
- if globalIPv6 != nil {
- globalIPv6Network = globalIPv6
- }
-
- networkSettings, err := Allocate("container_id", requestedMac, requestedIP, requestedIPv6)
- if err == nil && expectFail {
- t.Fatal("Doesn't fail to allocate network interface")
- } else if err != nil && !expectFail {
- t.Fatal("Failed to allocate network interface")
-
- }
-
- if globalIPv6 != nil {
- // check for bug #11427
- if globalIPv6Network.IP.String() != globalIPv6.IP.String() {
- t.Fatal("globalIPv6Network was modified during allocation")
- }
- // clean up IPv6 global
- globalIPv6Network = nil
- }
-
- return networkSettings
-}
-
-func TestIPv6InterfaceAllocationAutoNetmaskGt80(t *testing.T) {
- _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/81")
- networkSettings := newInterfaceAllocation(t, subnet, "", "", "", false)
-
- // ensure low manually assigend global ip
- ip := net.ParseIP(networkSettings.GlobalIPv6Address)
- _, subnet, _ = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet.IP.String(), 120))
- if !subnet.Contains(ip) {
- t.Fatalf("Error ip %s not in subnet %s", ip.String(), subnet.String())
- }
-}
-
-func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) {
- _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
- networkSettings := newInterfaceAllocation(t, subnet, "ab:cd:ab:cd:ab:cd", "", "", false)
-
- // ensure global ip with mac
- ip := net.ParseIP(networkSettings.GlobalIPv6Address)
- expectedIP := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
- if ip.String() != expectedIP.String() {
- t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
- }
-
- // ensure link local format
- ip = net.ParseIP(networkSettings.LinkLocalIPv6Address)
- expectedIP = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
- if ip.String() != expectedIP.String() {
- t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
- }
-
-}
-
-func TestIPv6InterfaceAllocationRequest(t *testing.T) {
- _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
- expectedIP := "2001:db8:1234:1234:1234::1328"
-
- networkSettings := newInterfaceAllocation(t, subnet, "", "", expectedIP, false)
-
- // ensure global ip with mac
- ip := net.ParseIP(networkSettings.GlobalIPv6Address)
- if ip.String() != expectedIP {
- t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP)
- }
-
- // retry -> fails for duplicated address
- _ = newInterfaceAllocation(t, subnet, "", "", expectedIP, true)
-}
-
-func TestMacAddrGeneration(t *testing.T) {
- ip := net.ParseIP("192.168.0.1")
- mac := generateMacAddr(ip).String()
-
- // Should be consistent.
- if generateMacAddr(ip).String() != mac {
- t.Fatal("Inconsistent MAC address")
- }
-
- // Should be unique.
- ip2 := net.ParseIP("192.168.0.2")
- if generateMacAddr(ip2).String() == mac {
- t.Fatal("Non-unique MAC address")
- }
-}
-
-func TestLinkContainers(t *testing.T) {
- // Init driver
- if err := InitDriver(new(Config)); err != nil {
- t.Fatal("Failed to initialize network driver")
- }
-
- // Allocate interface
- if _, err := Allocate("container_id", "", "", ""); err != nil {
- t.Fatal("Failed to allocate network interface")
- }
-
- bridgeIface = "lo"
- if _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter); err != nil {
- t.Fatal(err)
- }
-
- if err := LinkContainers("-I", "172.17.0.1", "172.17.0.2", []nat.Port{nat.Port("1234")}, false); err != nil {
- t.Fatal("LinkContainers failed")
- }
-
- // flush rules
- if _, err := iptables.Raw([]string{"-F", "DOCKER"}...); err != nil {
- t.Fatal(err)
- }
-
-}
diff --git a/daemon/networkdriver/network.go b/daemon/networkdriver/network.go
deleted file mode 100644
index 8dda789d2f..0000000000
--- a/daemon/networkdriver/network.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package networkdriver
-
-import (
- "errors"
-)
-
-var (
- ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver")
- ErrNetworkOverlaps = errors.New("requested network overlaps with existing network")
-)
diff --git a/daemon/networkdriver/portmapper/mapper_test.go b/daemon/networkdriver/portmapper/mapper_test.go
deleted file mode 100644
index 729fe56075..0000000000
--- a/daemon/networkdriver/portmapper/mapper_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package portmapper
-
-import (
- "net"
- "testing"
-
- "github.com/docker/docker/pkg/iptables"
-)
-
-func init() {
- // override this func to mock out the proxy server
- NewProxy = NewMockProxyCommand
-}
-
-func TestSetIptablesChain(t *testing.T) {
- pm := New()
-
- c := &iptables.Chain{
- Name: "TEST",
- Bridge: "192.168.1.1",
- }
-
- if pm.chain != nil {
- t.Fatal("chain should be nil at init")
- }
-
- pm.SetIptablesChain(c)
- if pm.chain == nil {
- t.Fatal("chain should not be nil after set")
- }
-}
-
-func TestMapPorts(t *testing.T) {
- pm := New()
- dstIp1 := net.ParseIP("192.168.0.1")
- dstIp2 := net.ParseIP("192.168.0.2")
- dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80}
- dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80}
-
- srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
- srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
-
- addrEqual := func(addr1, addr2 net.Addr) bool {
- return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
- }
-
- if host, err := pm.Map(srcAddr1, dstIp1, 80); err != nil {
- t.Fatalf("Failed to allocate port: %s", err)
- } else if !addrEqual(dstAddr1, host) {
- t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
- dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
- }
-
- if _, err := pm.Map(srcAddr1, dstIp1, 80); err == nil {
- t.Fatalf("Port is in use - mapping should have failed")
- }
-
- if _, err := pm.Map(srcAddr2, dstIp1, 80); err == nil {
- t.Fatalf("Port is in use - mapping should have failed")
- }
-
- if _, err := pm.Map(srcAddr2, dstIp2, 80); err != nil {
- t.Fatalf("Failed to allocate port: %s", err)
- }
-
- if pm.Unmap(dstAddr1) != nil {
- t.Fatalf("Failed to release port")
- }
-
- if pm.Unmap(dstAddr2) != nil {
- t.Fatalf("Failed to release port")
- }
-
- if pm.Unmap(dstAddr2) == nil {
- t.Fatalf("Port already released, but no error reported")
- }
-}
-
-func TestGetUDPKey(t *testing.T) {
- addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
-
- key := getKey(addr)
-
- if expected := "192.168.1.5:53/udp"; key != expected {
- t.Fatalf("expected key %s got %s", expected, key)
- }
-}
-
-func TestGetTCPKey(t *testing.T) {
- addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80}
-
- key := getKey(addr)
-
- if expected := "192.168.1.5:80/tcp"; key != expected {
- t.Fatalf("expected key %s got %s", expected, key)
- }
-}
-
-func TestGetUDPIPAndPort(t *testing.T) {
- addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
-
- ip, port := getIPAndPort(addr)
- if expected := "192.168.1.5"; ip.String() != expected {
- t.Fatalf("expected ip %s got %s", expected, ip)
- }
-
- if ep := 53; port != ep {
- t.Fatalf("expected port %d got %d", ep, port)
- }
-}
-
-func TestMapAllPortsSingleInterface(t *testing.T) {
- pm := New()
- dstIp1 := net.ParseIP("0.0.0.0")
- srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
-
- hosts := []net.Addr{}
- var host net.Addr
- var err error
-
- defer func() {
- for _, val := range hosts {
- pm.Unmap(val)
- }
- }()
-
- for i := 0; i < 10; i++ {
- start, end := pm.Allocator.Begin, pm.Allocator.End
- for i := start; i < end; i++ {
- if host, err = pm.Map(srcAddr1, dstIp1, 0); err != nil {
- t.Fatal(err)
- }
-
- hosts = append(hosts, host)
- }
-
- if _, err := pm.Map(srcAddr1, dstIp1, start); err == nil {
- t.Fatalf("Port %d should be bound but is not", start)
- }
-
- for _, val := range hosts {
- if err := pm.Unmap(val); err != nil {
- t.Fatal(err)
- }
- }
-
- hosts = []net.Addr{}
- }
-}
diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go
deleted file mode 100644
index 9f0c88cd5e..0000000000
--- a/daemon/networkdriver/utils.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package networkdriver
-
-import (
- "errors"
- "fmt"
- "net"
-
- "github.com/docker/libcontainer/netlink"
-)
-
-var (
- networkGetRoutesFct = netlink.NetworkGetRoutes
- ErrNoDefaultRoute = errors.New("no default route")
-)
-
-func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error {
- if len(nameservers) > 0 {
- for _, ns := range nameservers {
- _, nsNetwork, err := net.ParseCIDR(ns)
- if err != nil {
- return err
- }
- if NetworkOverlaps(toCheck, nsNetwork) {
- return ErrNetworkOverlapsWithNameservers
- }
- }
- }
- return nil
-}
-
-func CheckRouteOverlaps(toCheck *net.IPNet) error {
- networks, err := networkGetRoutesFct()
- if err != nil {
- return err
- }
-
- for _, network := range networks {
- if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) {
- return ErrNetworkOverlaps
- }
- }
- return nil
-}
-
-// Detects overlap between one IPNet and another
-func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
- if len(netX.IP) == len(netY.IP) {
- if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
- return true
- }
- if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
- return true
- }
- }
- return false
-}
-
-// Calculates the first and last IP addresses in an IPNet
-func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
- var netIP net.IP
- if network.IP.To4() != nil {
- netIP = network.IP.To4()
- } else if network.IP.To16() != nil {
- netIP = network.IP.To16()
- } else {
- return nil, nil
- }
-
- lastIP := make([]byte, len(netIP), len(netIP))
-
- for i := 0; i < len(netIP); i++ {
- lastIP[i] = netIP[i] | ^network.Mask[i]
- }
- return netIP.Mask(network.Mask), net.IP(lastIP)
-}
-
-// Return the first IPv4 address and slice of IPv6 addresses for the specified network interface
-func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) {
- iface, err := net.InterfaceByName(name)
- if err != nil {
- return nil, nil, err
- }
- addrs, err := iface.Addrs()
- if err != nil {
- return nil, nil, err
- }
- var addrs4 []net.Addr
- var addrs6 []net.Addr
- for _, addr := range addrs {
- ip := (addr.(*net.IPNet)).IP
- if ip4 := ip.To4(); ip4 != nil {
- addrs4 = append(addrs4, addr)
- } else if ip6 := ip.To16(); len(ip6) == net.IPv6len {
- addrs6 = append(addrs6, addr)
- }
- }
- switch {
- case len(addrs4) == 0:
- return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name)
- case len(addrs4) > 1:
- fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
- name, (addrs4[0].(*net.IPNet)).IP)
- }
- return addrs4[0], addrs6, nil
-}
-
-func GetDefaultRouteIface() (*net.Interface, error) {
- rs, err := networkGetRoutesFct()
- if err != nil {
- return nil, fmt.Errorf("unable to get routes: %v", err)
- }
- for _, r := range rs {
- if r.Default {
- return r.Iface, nil
- }
- }
- return nil, ErrNoDefaultRoute
-}
diff --git a/daemon/pause.go b/daemon/pause.go
new file mode 100644
index 0000000000..348f83fc72
--- /dev/null
+++ b/daemon/pause.go
@@ -0,0 +1,18 @@
+package daemon
+
+import "fmt"
+
+// ContainerPause pauses a container
+func (daemon *Daemon) ContainerPause(name string) error {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return err
+ }
+
+ if err := container.Pause(); err != nil {
+ return fmt.Errorf("Cannot pause container %s: %s", name, err)
+ }
+ container.LogEvent("pause")
+
+ return nil
+}
diff --git a/daemon/resize.go b/daemon/resize.go
index 060634b13b..f22539466e 100644
--- a/daemon/resize.go
+++ b/daemon/resize.go
@@ -1,12 +1,19 @@
package daemon
+func (daemon *Daemon) ContainerResize(name string, height, width int) error {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return err
+ }
+
+ return container.Resize(height, width)
+}
+
func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
execConfig, err := daemon.getExecConfig(name)
if err != nil {
return err
}
- if err := execConfig.Resize(height, width); err != nil {
- return err
- }
- return nil
+
+ return execConfig.Resize(height, width)
}
diff --git a/daemon/stats.go b/daemon/stats.go
index a95168d128..c7da913223 100644
--- a/daemon/stats.go
+++ b/daemon/stats.go
@@ -10,7 +10,7 @@ import (
"github.com/docker/libcontainer/cgroups"
)
-func (daemon *Daemon) ContainerStats(name string, out io.Writer) error {
+func (daemon *Daemon) ContainerStats(name string, stream bool, out io.Writer) error {
updates, err := daemon.SubscribeToContainerStats(name)
if err != nil {
return err
@@ -27,6 +27,9 @@ func (daemon *Daemon) ContainerStats(name string, out io.Writer) error {
daemon.UnsubscribeToContainerStats(name, updates)
return err
}
+ if !stream {
+ break
+ }
}
return nil
}
diff --git a/daemon/stats_collector.go b/daemon/stats_collector.go
index 22239743a6..98b44c3de2 100644
--- a/daemon/stats_collector.go
+++ b/daemon/stats_collector.go
@@ -24,6 +24,7 @@ func newStatsCollector(interval time.Duration) *statsCollector {
interval: interval,
publishers: make(map[*Container]*pubsub.Publisher),
clockTicks: uint64(system.GetClockTicks()),
+ bufReader: bufio.NewReaderSize(nil, 128),
}
go s.run()
return s
@@ -35,6 +36,7 @@ type statsCollector struct {
interval time.Duration
clockTicks uint64
publishers map[*Container]*pubsub.Publisher
+ bufReader *bufio.Reader
}
// collect registers the container with the collector and adds it to
@@ -121,14 +123,23 @@ const nanoSeconds = 1e9
// getSystemCpuUSage returns the host system's cpu usage in nanoseconds
// for the system to match the cgroup readings are returned in the same format.
func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
+ var line string
f, err := os.Open("/proc/stat")
if err != nil {
return 0, err
}
- defer f.Close()
- sc := bufio.NewScanner(f)
- for sc.Scan() {
- parts := strings.Fields(sc.Text())
+ defer func() {
+ s.bufReader.Reset(nil)
+ f.Close()
+ }()
+ s.bufReader.Reset(f)
+ err = nil
+ for err == nil {
+ line, err = s.bufReader.ReadString('\n')
+ if err != nil {
+ break
+ }
+ parts := strings.Fields(line)
switch parts[0] {
case "cpu":
if len(parts) < 8 {
diff --git a/daemon/unpause.go b/daemon/unpause.go
new file mode 100644
index 0000000000..b13c85e19d
--- /dev/null
+++ b/daemon/unpause.go
@@ -0,0 +1,18 @@
+package daemon
+
+import "fmt"
+
+// ContainerUnpause unpauses a container
+func (daemon *Daemon) ContainerUnpause(name string) error {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return err
+ }
+
+ if err := container.Unpause(); err != nil {
+ return fmt.Errorf("Cannot unpause container %s: %s", name, err)
+ }
+ container.LogEvent("unpause")
+
+ return nil
+}
diff --git a/daemon/volumes.go b/daemon/volumes.go
index ea117a1e3f..2fdcd8311d 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -1,225 +1,103 @@
package daemon
import (
+ "encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
- "sort"
"strings"
- "github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/chrootarchive"
- "github.com/docker/docker/pkg/mount"
- "github.com/docker/docker/pkg/symlink"
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/volume"
)
-type volumeMount struct {
- containerPath string
- hostPath string
- writable bool
- copyData bool
- from string
+type mountPoint struct {
+ Name string
+ Destination string
+ Driver string
+ RW bool
+ Volume volume.Volume `json:"-"`
+ Source string
}
-func (container *Container) prepareVolumes() error {
- if container.Volumes == nil || len(container.Volumes) == 0 {
- container.Volumes = make(map[string]string)
- container.VolumesRW = make(map[string]bool)
+func (m *mountPoint) Setup() (string, error) {
+ if m.Volume != nil {
+ return m.Volume.Mount()
}
- if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil {
- container.AppliedVolumesFrom = make(map[string]struct{})
- }
- return container.createVolumes()
-}
-
-func (container *Container) createVolumes() error {
- mounts := make(map[string]*volumeMount)
-
- // get the normal volumes
- for path := range container.Config.Volumes {
- path = filepath.Clean(path)
- // skip if there is already a volume for this container path
- if _, exists := container.Volumes[path]; exists {
- continue
- }
-
- realPath, err := container.GetResourcePath(path)
- if err != nil {
- return err
- }
- if stat, err := os.Stat(realPath); err == nil {
- if !stat.IsDir() {
- return fmt.Errorf("can't mount to container path, file exists - %s", path)
+ if len(m.Source) > 0 {
+ if _, err := os.Stat(m.Source); err != nil {
+ if !os.IsNotExist(err) {
+ return "", err
+ }
+ if err := os.MkdirAll(m.Source, 0755); err != nil {
+ return "", err
}
}
-
- mnt := &volumeMount{
- containerPath: path,
- writable: true,
- copyData: true,
- }
- mounts[mnt.containerPath] = mnt
+ return m.Source, nil
}
- // Get all the bind mounts
- // track bind paths separately due to #10618
- bindPaths := make(map[string]struct{})
- for _, spec := range container.hostConfig.Binds {
- mnt, err := parseBindMountSpec(spec)
- if err != nil {
- return err
- }
-
- // #10618
- if _, exists := bindPaths[mnt.containerPath]; exists {
- return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath)
- }
-
- bindPaths[mnt.containerPath] = struct{}{}
- mounts[mnt.containerPath] = mnt
- }
-
- // Get volumes from
- for _, from := range container.hostConfig.VolumesFrom {
- cID, mode, err := parseVolumesFromSpec(from)
- if err != nil {
- return err
- }
- if _, exists := container.AppliedVolumesFrom[cID]; exists {
- // skip since it's already been applied
- continue
- }
-
- c, err := container.daemon.Get(cID)
- if err != nil {
- return fmt.Errorf("container %s not found, impossible to mount its volumes", cID)
- }
-
- for _, mnt := range c.volumeMounts() {
- mnt.writable = mnt.writable && (mode == "rw")
- mnt.from = cID
- mounts[mnt.containerPath] = mnt
- }
- }
-
- for _, mnt := range mounts {
- containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs)
- if err != nil {
- return err
- }
-
- // Create the actual volume
- v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable)
- if err != nil {
- return err
- }
-
- container.VolumesRW[mnt.containerPath] = mnt.writable
- container.Volumes[mnt.containerPath] = v.Path
- v.AddContainer(container.ID)
- if mnt.from != "" {
- container.AppliedVolumesFrom[mnt.from] = struct{}{}
- }
-
- if mnt.writable && mnt.copyData {
- // Copy whatever is in the container at the containerPath to the volume
- copyExistingContents(containerMntPath, v.Path)
- }
- }
-
- return nil
+ return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
}
-// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
-func (container *Container) sortedVolumeMounts() []string {
- var mountPaths []string
- for path := range container.Volumes {
- mountPaths = append(mountPaths, path)
+func (m *mountPoint) Path() string {
+ if m.Volume != nil {
+ return m.Volume.Path()
}
- sort.Strings(mountPaths)
- return mountPaths
+ return m.Source
}
-func (container *Container) VolumePaths() map[string]struct{} {
- var paths = make(map[string]struct{})
- for _, path := range container.Volumes {
- paths[path] = struct{}{}
+func parseBindMount(spec string, config *runconfig.Config) (*mountPoint, error) {
+ bind := &mountPoint{
+ RW: true,
}
- return paths
-}
-
-func (container *Container) registerVolumes() {
- for path := range container.VolumePaths() {
- if v := container.daemon.volumes.Get(path); v != nil {
- v.AddContainer(container.ID)
- continue
- }
-
- // if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
- writable := true
- if rw, exists := container.VolumesRW[path]; exists {
- writable = rw
- }
- v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
- if err != nil {
- logrus.Debugf("error registering volume %s: %v", path, err)
- continue
- }
- v.AddContainer(container.ID)
- }
-}
-
-func (container *Container) derefVolumes() {
- for path := range container.VolumePaths() {
- vol := container.daemon.volumes.Get(path)
- if vol == nil {
- logrus.Debugf("Volume %s was not found and could not be dereferenced", path)
- continue
- }
- vol.RemoveContainer(container.ID)
- }
-}
-
-func parseBindMountSpec(spec string) (*volumeMount, error) {
arr := strings.Split(spec, ":")
- mnt := &volumeMount{}
switch len(arr) {
case 2:
- mnt.hostPath = arr[0]
- mnt.containerPath = arr[1]
- mnt.writable = true
+ bind.Destination = arr[1]
case 3:
- mnt.hostPath = arr[0]
- mnt.containerPath = arr[1]
- mnt.writable = validMountMode(arr[2]) && arr[2] == "rw"
+ bind.Destination = arr[1]
+ if !validMountMode(arr[2]) {
+ return nil, fmt.Errorf("invalid mode for volumes-from: %s", arr[2])
+ }
+ bind.RW = arr[2] == "rw"
default:
return nil, fmt.Errorf("Invalid volume specification: %s", spec)
}
- if !filepath.IsAbs(mnt.hostPath) {
- return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath)
+ name, source, err := parseVolumeSource(arr[0], config)
+ if err != nil {
+ return nil, err
}
- mnt.hostPath = filepath.Clean(mnt.hostPath)
- mnt.containerPath = filepath.Clean(mnt.containerPath)
- return mnt, nil
+ if len(source) == 0 {
+ bind.Driver = config.VolumeDriver
+ if len(bind.Driver) == 0 {
+ bind.Driver = volume.DefaultDriverName
+ }
+ } else {
+ bind.Source = filepath.Clean(source)
+ }
+
+ bind.Name = name
+ bind.Destination = filepath.Clean(bind.Destination)
+ return bind, nil
}
-func parseVolumesFromSpec(spec string) (string, string, error) {
- specParts := strings.SplitN(spec, ":", 2)
- if len(specParts) == 0 {
+func parseVolumesFrom(spec string) (string, string, error) {
+ if len(spec) == 0 {
return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
}
- var (
- id = specParts[0]
- mode = "rw"
- )
+ specParts := strings.SplitN(spec, ":", 2)
+ id := specParts[0]
+ mode := "rw"
+
if len(specParts) == 2 {
mode = specParts[1]
if !validMountMode(mode) {
@@ -234,74 +112,33 @@ func validMountMode(mode string) bool {
"rw": true,
"ro": true,
}
-
return validModes[mode]
}
func (container *Container) specialMounts() []execdriver.Mount {
var mounts []execdriver.Mount
if container.ResolvConfPath != "" {
- mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true})
+ mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: !container.hostConfig.ReadonlyRootfs, Private: true})
}
if container.HostnamePath != "" {
- mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true})
+ mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: !container.hostConfig.ReadonlyRootfs, Private: true})
}
if container.HostsPath != "" {
- mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true})
+ mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: !container.hostConfig.ReadonlyRootfs, Private: true})
}
return mounts
}
-func (container *Container) setupMounts() error {
- mounts := []execdriver.Mount{}
-
- // Mount user specified volumes
- // Note, these are not private because you may want propagation of (un)mounts from host
- // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
- // want this new mount in the container
- // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
- for _, path := range container.sortedVolumeMounts() {
- mounts = append(mounts, execdriver.Mount{
- Source: container.Volumes[path],
- Destination: path,
- Writable: container.VolumesRW[path],
- })
- }
-
- mounts = append(mounts, container.specialMounts()...)
-
- container.command.Mounts = mounts
- return nil
-}
-
-func (container *Container) volumeMounts() map[string]*volumeMount {
- mounts := make(map[string]*volumeMount)
-
- for containerPath, path := range container.Volumes {
- v := container.daemon.volumes.Get(path)
- if v == nil {
- // This should never happen
- logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path)
- continue
- }
- mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]}
- }
-
- return mounts
-}
-
func copyExistingContents(source, destination string) error {
volList, err := ioutil.ReadDir(source)
if err != nil {
return err
}
-
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(destination)
if err != nil {
return err
}
-
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
@@ -309,60 +146,136 @@ func copyExistingContents(source, destination string) error {
}
}
}
-
return copyOwnership(source, destination)
}
-func (container *Container) mountVolumes() error {
- for dest, source := range container.Volumes {
- v := container.daemon.volumes.Get(source)
- if v == nil {
- return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest)
- }
+// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
+// It follows the next sequence to decide what to mount in each final destination:
+//
+// 1. Select the previously configured mount points for the containers, if any.
+// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
+// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
+ binds := map[string]bool{}
+ mountPoints := map[string]*mountPoint{}
- destPath, err := container.GetResourcePath(dest)
+ // 1. Read already configured mount points.
+ for name, point := range container.MountPoints {
+ mountPoints[name] = point
+ }
+
+ // 2. Read volumes from other containers.
+ for _, v := range hostConfig.VolumesFrom {
+ containerID, mode, err := parseVolumesFrom(v)
if err != nil {
return err
}
- if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil {
- return fmt.Errorf("error while mounting volume %s: %v", source, err)
- }
- }
-
- for _, mnt := range container.specialMounts() {
- destPath, err := container.GetResourcePath(mnt.Destination)
+ c, err := daemon.Get(containerID)
if err != nil {
return err
}
- if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil {
- return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err)
+
+ for _, m := range c.MountPoints {
+ cp := m
+ cp.RW = m.RW && mode != "ro"
+
+ if len(m.Source) == 0 {
+ v, err := createVolume(m.Name, m.Driver)
+ if err != nil {
+ return err
+ }
+ cp.Volume = v
+ }
+
+ mountPoints[cp.Destination] = cp
}
}
+
+ // 3. Read bind mounts
+ for _, b := range hostConfig.Binds {
+ // #10618
+ bind, err := parseBindMount(b, container.Config)
+ if err != nil {
+ return err
+ }
+
+ if binds[bind.Destination] {
+ return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
+ }
+
+ if len(bind.Name) > 0 && len(bind.Driver) > 0 {
+ v, err := createVolume(bind.Name, bind.Driver)
+ if err != nil {
+ return err
+ }
+ bind.Volume = v
+ }
+
+ binds[bind.Destination] = true
+ mountPoints[bind.Destination] = bind
+ }
+
+ container.MountPoints = mountPoints
+
return nil
}
-func (container *Container) unmountVolumes() {
- for dest := range container.Volumes {
- destPath, err := container.GetResourcePath(dest)
- if err != nil {
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
- continue
+// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
+// It reads the container configuration and creates valid mount points for the old volumes.
+func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
+ jsonPath, err := container.jsonPath()
+ if err != nil {
+ return err
+ }
+ f, err := os.Open(jsonPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
}
- if err := mount.ForceUnmount(destPath); err != nil {
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
- continue
+ return err
+ }
+
+ type oldContVolCfg struct {
+ Volumes map[string]string
+ VolumesRW map[string]bool
+ }
+
+ vols := oldContVolCfg{
+ Volumes: make(map[string]string),
+ VolumesRW: make(map[string]bool),
+ }
+ if err := json.NewDecoder(f).Decode(&vols); err != nil {
+ return err
+ }
+
+ for destination, hostPath := range vols.Volumes {
+ vfsPath := filepath.Join(daemon.root, "vfs", "dir")
+
+ if strings.HasPrefix(hostPath, vfsPath) {
+ id := filepath.Base(hostPath)
+
+ rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
+ container.addLocalMountPoint(id, destination, rw)
}
}
- for _, mnt := range container.specialMounts() {
- destPath, err := container.GetResourcePath(mnt.Destination)
- if err != nil {
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
- continue
- }
- if err := mount.ForceUnmount(destPath); err != nil {
- logrus.Errorf("error while unmounting volumes %s: %v", destPath, err)
- }
- }
+ return container.ToDisk()
+}
+
+func createVolume(name, driverName string) (volume.Volume, error) {
+ vd, err := getVolumeDriver(driverName)
+
+ if err != nil {
+ return nil, err
+ }
+ return vd.Create(name)
+}
+
+func removeVolume(v volume.Volume) error {
+ vd, err := getVolumeDriver(v.DriverName())
+ if err != nil {
+ return nil
+ }
+ return vd.Remove(v)
}
diff --git a/daemon/volumes_experimental.go b/daemon/volumes_experimental.go
new file mode 100644
index 0000000000..c39b7907b0
--- /dev/null
+++ b/daemon/volumes_experimental.go
@@ -0,0 +1,26 @@
+// +build experimental
+
+package daemon
+
+import (
+ "path/filepath"
+
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/volume"
+ "github.com/docker/docker/volume/drivers"
+)
+
+func getVolumeDriver(name string) (volume.Driver, error) {
+ if name == "" {
+ name = volume.DefaultDriverName
+ }
+ return volumedrivers.Lookup(name)
+}
+
+func parseVolumeSource(spec string, config *runconfig.Config) (string, string, error) {
+ if !filepath.IsAbs(spec) {
+ return spec, "", nil
+ }
+
+ return "", spec, nil
+}
diff --git a/daemon/volumes_experimental_unit_test.go b/daemon/volumes_experimental_unit_test.go
new file mode 100644
index 0000000000..1201f5154e
--- /dev/null
+++ b/daemon/volumes_experimental_unit_test.go
@@ -0,0 +1,86 @@
+// +build experimental
+
+package daemon
+
+import (
+ "testing"
+
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/volume"
+ "github.com/docker/docker/volume/drivers"
+)
+
+type fakeDriver struct{}
+
+func (fakeDriver) Name() string { return "fake" }
+func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
+func (fakeDriver) Remove(v volume.Volume) error { return nil }
+
+func TestGetVolumeDriver(t *testing.T) {
+ _, err := getVolumeDriver("missing")
+ if err == nil {
+ t.Fatal("Expected error, was nil")
+ }
+
+ volumedrivers.Register(fakeDriver{}, "fake")
+ d, err := getVolumeDriver("fake")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if d.Name() != "fake" {
+ t.Fatalf("Expected fake driver, got %s\n", d.Name())
+ }
+}
+
+func TestParseBindMount(t *testing.T) {
+ cases := []struct {
+ bind string
+ driver string
+ expDest string
+ expSource string
+ expName string
+ expDriver string
+ expRW bool
+ fail bool
+ }{
+ {"/tmp:/tmp", "", "/tmp", "/tmp", "", "", true, false},
+ {"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", false, false},
+ {"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", true, false},
+ {"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", false, true},
+ {"name:/tmp", "", "/tmp", "", "name", "local", true, false},
+ {"name:/tmp", "external", "/tmp", "", "name", "external", true, false},
+ {"name:/tmp:ro", "local", "/tmp", "", "name", "local", false, false},
+ {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", true, false},
+ }
+
+ for _, c := range cases {
+ conf := &runconfig.Config{VolumeDriver: c.driver}
+ m, err := parseBindMount(c.bind, conf)
+ if c.fail {
+ if err == nil {
+ t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
+ }
+ continue
+ }
+
+ if m.Destination != c.expDest {
+ t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
+ }
+
+ if m.Source != c.expSource {
+ t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
+ }
+
+ if m.Name != c.expName {
+ t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
+ }
+
+ if m.Driver != c.expDriver {
+ t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
+ }
+
+ if m.RW != c.expRW {
+ t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
+ }
+ }
+}
diff --git a/daemon/volumes_linux.go b/daemon/volumes_linux.go
index 93fea81659..8eea5e067f 100644
--- a/daemon/volumes_linux.go
+++ b/daemon/volumes_linux.go
@@ -4,7 +4,11 @@ package daemon
import (
"os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/pkg/system"
)
@@ -22,3 +26,45 @@ func copyOwnership(source, destination string) error {
return os.Chmod(destination, os.FileMode(stat.Mode()))
}
+
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
+ var mounts []execdriver.Mount
+ for _, m := range container.MountPoints {
+ path, err := m.Setup()
+ if err != nil {
+ return nil, err
+ }
+
+ mounts = append(mounts, execdriver.Mount{
+ Source: path,
+ Destination: m.Destination,
+ Writable: m.RW,
+ })
+ }
+
+ mounts = sortMounts(mounts)
+ return append(mounts, container.networkMounts()...), nil
+}
+
+func sortMounts(m []execdriver.Mount) []execdriver.Mount {
+ sort.Sort(mounts(m))
+ return m
+}
+
+type mounts []execdriver.Mount
+
+func (m mounts) Len() int {
+ return len(m)
+}
+
+func (m mounts) Less(i, j int) bool {
+ return m.parts(i) < m.parts(j)
+}
+
+func (m mounts) Swap(i, j int) {
+ m[i], m[j] = m[j], m[i]
+}
+
+func (m mounts) parts(i int) int {
+ return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator)))
+}
diff --git a/daemon/volumes_stubs.go b/daemon/volumes_stubs.go
new file mode 100644
index 0000000000..1d2d873d8e
--- /dev/null
+++ b/daemon/volumes_stubs.go
@@ -0,0 +1,24 @@
+// +build !experimental
+
+package daemon
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/volume"
+ "github.com/docker/docker/volume/drivers"
+)
+
+func getVolumeDriver(_ string) (volume.Driver, error) {
+ return volumedrivers.Lookup(volume.DefaultDriverName)
+}
+
+func parseVolumeSource(spec string, _ *runconfig.Config) (string, string, error) {
+ if !filepath.IsAbs(spec) {
+ return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
+ }
+
+ return "", spec, nil
+}
diff --git a/daemon/volumes_stubs_unit_test.go b/daemon/volumes_stubs_unit_test.go
new file mode 100644
index 0000000000..a3cafe6550
--- /dev/null
+++ b/daemon/volumes_stubs_unit_test.go
@@ -0,0 +1,81 @@
+// +build !experimental
+
+package daemon
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/docker/docker/runconfig"
+ "github.com/docker/docker/volume"
+ "github.com/docker/docker/volume/drivers"
+ "github.com/docker/docker/volume/local"
+)
+
+func TestGetVolumeDefaultDriver(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "volume-test-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ l, err := local.New(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ volumedrivers.Register(l, volume.DefaultDriverName)
+ d, err := getVolumeDriver("missing")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if d.Name() != volume.DefaultDriverName {
+ t.Fatalf("Expected local driver, was %s\n", d.Name)
+ }
+}
+
+func TestParseBindMount(t *testing.T) {
+ cases := []struct {
+ bind string
+ expDest string
+ expSource string
+ expName string
+ expRW bool
+ fail bool
+ }{
+ {"/tmp:/tmp", "/tmp", "/tmp", "", true, false},
+ {"/tmp:/tmp:ro", "/tmp", "/tmp", "", false, false},
+ {"/tmp:/tmp:rw", "/tmp", "/tmp", "", true, false},
+ {"/tmp:/tmp:foo", "/tmp", "/tmp", "", false, true},
+ {"name:/tmp", "", "", "", false, true},
+ {"local/name:/tmp:rw", "", "", "", true, true},
+ }
+
+ for _, c := range cases {
+ conf := &runconfig.Config{}
+ m, err := parseBindMount(c.bind, conf)
+ if c.fail {
+ if err == nil {
+ t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
+ }
+ continue
+ }
+
+ if m.Destination != c.expDest {
+ t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
+ }
+
+ if m.Source != c.expSource {
+ t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
+ }
+
+ if m.Name != c.expName {
+ t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
+ }
+
+ if m.RW != c.expRW {
+ t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
+ }
+ }
+}
diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go
new file mode 100644
index 0000000000..b1e7f72f89
--- /dev/null
+++ b/daemon/volumes_unit_test.go
@@ -0,0 +1,35 @@
+package daemon
+
+import "testing"
+
+func TestParseVolumeFrom(t *testing.T) {
+ cases := []struct {
+ spec string
+ expId string
+ expMode string
+ fail bool
+ }{
+ {"", "", "", true},
+ {"foobar", "foobar", "rw", false},
+ {"foobar:rw", "foobar", "rw", false},
+ {"foobar:ro", "foobar", "ro", false},
+ {"foobar:baz", "", "", true},
+ }
+
+ for _, c := range cases {
+ id, mode, err := parseVolumesFrom(c.spec)
+ if c.fail {
+ if err == nil {
+ t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
+ }
+ continue
+ }
+
+ if id != c.expId {
+ t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
+ }
+ if mode != c.expMode {
+ t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
+ }
+ }
+}
diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go
index ca1199a542..c37ca220d7 100644
--- a/daemon/volumes_windows.go
+++ b/daemon/volumes_windows.go
@@ -2,7 +2,13 @@
package daemon
+import "github.com/docker/docker/daemon/execdriver"
+
// Not supported on Windows
func copyOwnership(source, destination string) error {
return nil
}
+
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
+ return nil, nil
+}
diff --git a/daemon/wait.go b/daemon/wait.go
new file mode 100644
index 0000000000..1101b2f085
--- /dev/null
+++ b/daemon/wait.go
@@ -0,0 +1,12 @@
+package daemon
+
+import "time"
+
+func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
+ container, err := daemon.Get(name)
+ if err != nil {
+ return -1, err
+ }
+
+ return container.WaitStop(timeout)
+}
diff --git a/docker/daemon.go b/docker/daemon.go
index c6241b6060..fc08bc9c26 100644
--- a/docker/daemon.go
+++ b/docker/daemon.go
@@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
+ "time"
"github.com/Sirupsen/logrus"
apiserver "github.com/docker/docker/api/server"
@@ -14,13 +15,14 @@ import (
"github.com/docker/docker/daemon"
_ "github.com/docker/docker/daemon/execdriver/lxc"
_ "github.com/docker/docker/daemon/execdriver/native"
- "github.com/docker/docker/engine"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/timeutils"
"github.com/docker/docker/registry"
+ "github.com/docker/docker/utils"
)
const CanDaemon = true
@@ -31,6 +33,9 @@ var (
)
func init() {
+ if daemonCfg.LogConfig.Config == nil {
+ daemonCfg.LogConfig.Config = make(map[string]string)
+ }
daemonCfg.InstallFlags()
registryCfg.InstallFlags()
}
@@ -76,6 +81,10 @@ func migrateKey() (err error) {
}
func mainDaemon() {
+ if utils.ExperimentalBuild() {
+ logrus.Warn("Running experimental build")
+ }
+
if flag.NArg() != 0 {
flag.Usage()
return
@@ -83,13 +92,19 @@ func mainDaemon() {
logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: timeutils.RFC3339NanoFixed})
- eng := engine.New()
- signal.Trap(eng.Shutdown)
-
- if err := migrateKey(); err != nil {
- logrus.Fatal(err)
+ var pfile *pidfile.PidFile
+ if daemonCfg.Pidfile != "" {
+ pf, err := pidfile.New(daemonCfg.Pidfile)
+ if err != nil {
+ logrus.Fatalf("Error starting daemon: %v", err)
+ }
+ pfile = pf
+ defer func() {
+ if err := pfile.Remove(); err != nil {
+ logrus.Error(err)
+ }
+ }()
}
- daemonCfg.TrustKeyPath = *flTrustKey
serverConfig := &apiserver.ServerConfig{
Logging: true,
@@ -104,7 +119,7 @@ func mainDaemon() {
TlsKey: *flKey,
}
- api := apiserver.New(serverConfig, eng)
+ api := apiserver.New(serverConfig)
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
@@ -119,15 +134,19 @@ func mainDaemon() {
serveAPIWait <- nil
}()
- registryService := registry.NewService(registryCfg)
- d, err := daemon.NewDaemon(daemonCfg, eng, registryService)
- if err != nil {
- eng.Shutdown()
- logrus.Fatalf("Error starting daemon: %v", err)
+ if err := migrateKey(); err != nil {
+ logrus.Fatal(err)
}
+ daemonCfg.TrustKeyPath = *flTrustKey
- if err := d.Install(eng); err != nil {
- eng.Shutdown()
+ registryService := registry.NewService(registryCfg)
+ d, err := daemon.NewDaemon(daemonCfg, registryService)
+ if err != nil {
+ if pfile != nil {
+ if err := pfile.Remove(); err != nil {
+ logrus.Error(err)
+ }
+ }
logrus.Fatalf("Error starting daemon: %v", err)
}
@@ -140,19 +159,52 @@ func mainDaemon() {
"graphdriver": d.GraphDriver().String(),
}).Info("Docker daemon")
+ signal.Trap(func() {
+ api.Close()
+ <-serveAPIWait
+ shutdownDaemon(d, 15)
+ if pfile != nil {
+ if err := pfile.Remove(); err != nil {
+ logrus.Error(err)
+ }
+ }
+ })
+
// after the daemon is done setting up we can tell the api to start
// accepting connections with specified daemon
api.AcceptConnections(d)
// Daemon is fully initialized and handling API traffic
- // Wait for serve API job to complete
+ // Wait for serve API to complete
errAPI := <-serveAPIWait
- eng.Shutdown()
+ shutdownDaemon(d, 15)
if errAPI != nil {
+ if pfile != nil {
+ if err := pfile.Remove(); err != nil {
+ logrus.Error(err)
+ }
+ }
logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
}
}
+// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
+// d.Shutdown() is waiting too long to kill container or worst it's
+// blocked there
+func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
+ ch := make(chan struct{})
+ go func() {
+ d.Shutdown()
+ close(ch)
+ }()
+ select {
+ case <-ch:
+ logrus.Debug("Clean shutdown succeded")
+ case <-time.After(timeout * time.Second):
+ logrus.Error("Force shutdown daemon")
+ }
+}
+
// currentUserIsOwner checks whether the current user is the owner of the given
// file.
func currentUserIsOwner(f string) bool {
diff --git a/docker/docker.go b/docker/docker.go
index 1096b840f8..fd40f4b422 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io/ioutil"
"os"
+ "runtime"
"strings"
"github.com/Sirupsen/logrus"
@@ -45,15 +46,14 @@ func main() {
if *flLogLevel != "" {
lvl, err := logrus.ParseLevel(*flLogLevel)
if err != nil {
- logrus.Fatalf("Unable to parse logging level: %s", *flLogLevel)
+ fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel)
+ os.Exit(1)
}
setLogLevel(lvl)
} else {
setLogLevel(logrus.InfoLevel)
}
- // -D, --debug, -l/--log-level=debug processing
- // When/if -D is removed this block can be deleted
if *flDebug {
os.Setenv("DEBUG", "1")
setLogLevel(logrus.DebugLevel)
@@ -62,12 +62,22 @@ func main() {
if len(flHosts) == 0 {
defaultHost := os.Getenv("DOCKER_HOST")
if defaultHost == "" || *flDaemon {
- // If we do not have a host, default to unix socket
- defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
+ if runtime.GOOS != "windows" {
+ // If we do not have a host, default to unix socket
+ defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
+ } else {
+ // If we do not have a host, default to TCP socket on Windows
+ defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
+ }
}
defaultHost, err := opts.ValidateHost(defaultHost)
if err != nil {
- logrus.Fatal(err)
+ if *flDaemon {
+ logrus.Fatal(err)
+ } else {
+ fmt.Fprint(os.Stderr, err)
+ }
+ os.Exit(1)
}
flHosts = append(flHosts, defaultHost)
}
@@ -84,7 +94,8 @@ func main() {
}
if len(flHosts) > 1 {
- logrus.Fatal("Please specify only one -H")
+ fmt.Fprintf(os.Stderr, "Please specify only one -H")
+ os.Exit(0)
}
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
@@ -105,7 +116,8 @@ func main() {
certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(*flCa)
if err != nil {
- logrus.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
+ fmt.Fprintf(os.Stderr, "Couldn't read ca cert %s: %s\n", *flCa, err)
+ os.Exit(1)
}
certPool.AppendCertsFromPEM(file)
tlsConfig.RootCAs = certPool
@@ -120,7 +132,8 @@ func main() {
*flTls = true
cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
if err != nil {
- logrus.Fatalf("Couldn't load X509 key pair: %q. Make sure the key is encrypted", err)
+ fmt.Fprintf(os.Stderr, "Couldn't load X509 key pair: %q. Make sure the key is encrypted\n", err)
+ os.Exit(1)
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
@@ -137,11 +150,13 @@ func main() {
if err := cli.Cmd(flag.Args()...); err != nil {
if sterr, ok := err.(client.StatusError); ok {
if sterr.Status != "" {
- logrus.Println(sterr.Status)
+ fmt.Fprintln(cli.Err(), sterr.Status)
+ os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
- logrus.Fatal(err)
+ fmt.Fprintln(cli.Err(), err)
+ os.Exit(1)
}
}
diff --git a/docker/flags.go b/docker/flags.go
index 7f0c10d2d3..cbdb6a859d 100644
--- a/docker/flags.go
+++ b/docker/flags.go
@@ -5,15 +5,69 @@ import (
"os"
"path/filepath"
"runtime"
+ "sort"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/homedir"
flag "github.com/docker/docker/pkg/mflag"
)
+type command struct {
+ name string
+ description string
+}
+
+type byName []command
+
+func (a byName) Len() int { return len(a) }
+func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool { return a[i].name < a[j].name }
+
var (
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != ""
+
+ dockerCommands = []command{
+ {"attach", "Attach to a running container"},
+ {"build", "Build an image from a Dockerfile"},
+ {"commit", "Create a new image from a container's changes"},
+ {"cp", "Copy files/folders from a container's filesystem to the host path"},
+ {"create", "Create a new container"},
+ {"diff", "Inspect changes on a container's filesystem"},
+ {"events", "Get real time events from the server"},
+ {"exec", "Run a command in a running container"},
+ {"export", "Stream the contents of a container as a tar archive"},
+ {"history", "Show the history of an image"},
+ {"images", "List images"},
+ {"import", "Create a new filesystem image from the contents of a tarball"},
+ {"info", "Display system-wide information"},
+ {"inspect", "Return low-level information on a container or image"},
+ {"kill", "Kill a running container"},
+ {"load", "Load an image from a tar archive"},
+ {"login", "Register or log in to a Docker registry server"},
+ {"logout", "Log out from a Docker registry server"},
+ {"logs", "Fetch the logs of a container"},
+ {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
+ {"pause", "Pause all processes within a container"},
+ {"ps", "List containers"},
+ {"pull", "Pull an image or a repository from a Docker registry server"},
+ {"push", "Push an image or a repository to a Docker registry server"},
+ {"rename", "Rename an existing container"},
+ {"restart", "Restart a running container"},
+ {"rm", "Remove one or more containers"},
+ {"rmi", "Remove one or more images"},
+ {"run", "Run a command in a new container"},
+ {"save", "Save an image to a tar archive"},
+ {"search", "Search for an image on the Docker Hub"},
+ {"start", "Start a stopped container"},
+ {"stats", "Display a stream of a containers' resource usage statistics"},
+ {"stop", "Stop a running container"},
+ {"tag", "Tag an image into a repository"},
+ {"top", "Lookup the running processes of a container"},
+ {"unpause", "Unpause a paused container"},
+ {"version", "Show the Docker version information"},
+ {"wait", "Block until a container stops, then print its exit code"},
+ }
)
func init() {
@@ -75,49 +129,12 @@ func init() {
help := "\nCommands:\n"
- for _, command := range [][]string{
- {"attach", "Attach to a running container"},
- {"build", "Build an image from a Dockerfile"},
- {"commit", "Create a new image from a container's changes"},
- {"cp", "Copy files/folders from a container's filesystem to the host path"},
- {"create", "Create a new container"},
- {"diff", "Inspect changes on a container's filesystem"},
- {"events", "Get real time events from the server"},
- {"exec", "Run a command in a running container"},
- {"export", "Stream the contents of a container as a tar archive"},
- {"history", "Show the history of an image"},
- {"images", "List images"},
- {"import", "Create a new filesystem image from the contents of a tarball"},
- {"info", "Display system-wide information"},
- {"inspect", "Return low-level information on a container or image"},
- {"kill", "Kill a running container"},
- {"load", "Load an image from a tar archive"},
- {"login", "Register or log in to a Docker registry server"},
- {"logout", "Log out from a Docker registry server"},
- {"logs", "Fetch the logs of a container"},
- {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
- {"pause", "Pause all processes within a container"},
- {"ps", "List containers"},
- {"pull", "Pull an image or a repository from a Docker registry server"},
- {"push", "Push an image or a repository to a Docker registry server"},
- {"rename", "Rename an existing container"},
- {"restart", "Restart a running container"},
- {"rm", "Remove one or more containers"},
- {"rmi", "Remove one or more images"},
- {"run", "Run a command in a new container"},
- {"save", "Save an image to a tar archive"},
- {"search", "Search for an image on the Docker Hub"},
- {"start", "Start a stopped container"},
- {"stats", "Display a stream of a containers' resource usage statistics"},
- {"stop", "Stop a running container"},
- {"tag", "Tag an image into a repository"},
- {"top", "Lookup the running processes of a container"},
- {"unpause", "Unpause a paused container"},
- {"version", "Show the Docker version information"},
- {"wait", "Block until a container stops, then print its exit code"},
- } {
- help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
+ sort.Sort(byName(dockerCommands))
+
+ for _, cmd := range dockerCommands {
+ help += fmt.Sprintf(" %-10.10s%s\n", cmd.name, cmd.description)
}
+
help += "\nRun 'docker COMMAND --help' for more information on a command."
fmt.Fprintf(os.Stdout, "%s\n", help)
}
diff --git a/docs/Dockerfile b/docs/Dockerfile
index e30d4bbd54..bda3ccd9e9 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -8,8 +8,9 @@ MAINTAINER Sven Dowideit (@SvenDowideit)
# sub project
ENV COMPOSE_BRANCH release
ENV SWARM_BRANCH v0.2.0
-ENV MACHINE_BRANCH master
-ENV DISTRIB_BRANCH release/2.0
+ENV MACHINE_BRANCH docs
+ENV DISTRIB_BRANCH docs
+ENV KITEMATIC_BRANCH master
# TODO: need the full repo source to get the git version info
@@ -61,7 +62,14 @@ ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs
RUN sed -i.old '1s;^;no_version_dropdown: true;' \
/docs/sources/registry/*.md \
/docs/sources/registry/spec/*.md \
- /docs/sources/registry/spec/auth/*.md
+ /docs/sources/registry/spec/auth/*.md \
+ /docs/sources/registry/storage-drivers/*.md
+
+RUN sed -i.old -e '/^/g'\
+ /docs/sources/registry/*.md \
+ /docs/sources/registry/spec/*.md \
+ /docs/sources/registry/spec/auth/*.md \
+ /docs/sources/registry/storage-drivers/*.md
#######################
# Docker Swarm
@@ -109,5 +117,47 @@ ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/inde
RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/*.md
+#######################
+# Kitematic
+#######################
+ADD https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/faq.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/index.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/known-issues.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/minecraft-server.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/nginx-web-server.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/rethinkdb-dev-database.md \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/userguide.md \
+ /docs/sources/kitematic/
+RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/kitematic/*.md
+ADD https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/browse-images.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/change-folder.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-access-button.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-redis-container.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-terminal.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/containers.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/installing.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-add-server.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-create.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-data-volume.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-login.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-map.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-port.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-restart.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-server-address.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-2048-files.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-2048.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-create.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-data-folder.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-data-volume.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-hello-world.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-preview.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-serving-2048.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-container.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-create.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-ports.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethinkdb-preview.png \
+ https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/volumes-dir.png \
+ /docs/sources/kitematic/assets/
+
# Then build everything together, ready for mkdocs
RUN /docs/build.sh
diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md
index 4a8eba67df..7a5ceab0e2 100644
--- a/docs/man/docker-build.1.md
+++ b/docs/man/docker-build.1.md
@@ -17,8 +17,11 @@ docker-build - Build a new image from the source code at PATH
[**-m**|**--memory**[=*MEMORY*]]
[**--memory-swap**[=*MEMORY-SWAP*]]
[**-c**|**--cpu-shares**[=*0*]]
+[**--cpu-period**[=*0*]]
[**--cpu-quota**[=*0*]]
[**--cpuset-cpus**[=*CPUSET-CPUS*]]
+[**--cpuset-mems**[=*CPUSET-MEMS*]]
+[**--cgroup-parent**[=*CGROUP-PARENT*]]
PATH | URL | -
@@ -63,6 +66,77 @@ as context.
**-t**, **--tag**=""
Repository name (and optionally a tag) to be applied to the resulting image in case of success
+**-m**, **--memory**=*MEMORY*
+ Memory limit
+
+**--memory-swap**=*MEMORY-SWAP*
+ Total memory (memory + swap), '-1' to disable swap.
+
+**-c**, **--cpu-shares**=*0*
+ CPU shares (relative weight).
+
+ By default, all containers get the same proportion of CPU cycles. You can
+ change this proportion by adjusting the container's CPU share weighting
+ relative to the weighting of all other running containers.
+
+ To modify the proportion from the default of 1024, use the **-c** or
+ **--cpu-shares** flag to set the weighting to 2 or higher.
+
+ The proportion is only applied when CPU-intensive processes are running.
+ When tasks in one container are idle, the other containers can use the
+ left-over CPU time. The actual amount of CPU time used varies depending on
+ the number of containers running on the system.
+
+ For example, consider three containers, one has a cpu-share of 1024 and
+ two others have a cpu-share setting of 512. When processes in all three
+ containers attempt to use 100% of CPU, the first container would receive
+ 50% of the total CPU time. If you add a fourth container with a cpu-share
+ of 1024, the first container only gets 33% of the CPU. The remaining containers
+ receive 16.5%, 16.5% and 33% of the CPU.
+
+ On a multi-core system, the shares of CPU time are distributed across the CPU
+ cores. Even if a container is limited to less than 100% of CPU time, it can
+ use 100% of each individual CPU core.
+
+ For example, consider a system with more than three cores. If you start one
+ container **{C0}** with **-c=512** running one process, and another container
+ **{C1}** with **-c=1024** running two processes, this can result in the following
+ division of CPU shares:
+
+ PID container CPU CPU share
+ 100 {C0} 0 100% of CPU0
+ 101 {C1} 1 100% of CPU1
+ 102 {C1} 2 100% of CPU2
+
+**--cpu-period**=*0*
+ Limit the CPU CFS (Completely Fair Scheduler) period.
+
+ Limit the container's CPU usage. This flag causes the kernel to restrict the
+ container's CPU usage to the period you specify.
+
+**--cpu-quota**=*0*
+ Limit the CPU CFS (Completely Fair Scheduler) quota.
+
+ By default, containers run with the full CPU resource. This flag causes the
+kernel to restrict the container's CPU usage to the quota you specify.
+
+**--cpuset-cpus**=*CPUSET-CPUS*
+ CPUs in which to allow execution (0-3, 0,1).
+
+**--cpuset-mems**=*CPUSET-MEMS*
+ Memory nodes (MEMs) in which to allow execution (-1-3, 0,1). Only effective on
+ NUMA systems.
+
+ For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+to ensure the processes in your Docker container only use memory from the first
+two memory nodes.
+
+**--cgroup-parent**=*CGROUP-PARENT*
+ Path to `cgroups` under which the container's `cgroup` are created.
+
+ If the path is not absolute, the path is considered relative to the `cgroups` path of the init process.
+Cgroups are created if they do not already exist.
+
# EXAMPLES
## Building an image using a Dockerfile located inside the current directory
diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md
index 7aba222b29..26b6711309 100644
--- a/docs/man/docker-create.1.md
+++ b/docs/man/docker-create.1.md
@@ -8,10 +8,12 @@ docker-create - Create a new container
**docker create**
[**-a**|**--attach**[=*[]*]]
[**--add-host**[=*[]*]]
+[**--blkio-weight**[=*[BLKIO-WEIGHT]*]]
[**-c**|**--cpu-shares**[=*0*]]
[**--cap-add**[=*[]*]]
[**--cap-drop**[=*[]*]]
[**--cidfile**[=*CIDFILE*]]
+[**--cpu-period**[=*0*]]
[**--cpuset-cpus**[=*CPUSET-CPUS*]]
[**--cpuset-mems**[=*CPUSET-MEMS*]]
[**--cpu-quota**[=*0*]]
@@ -36,9 +38,11 @@ docker-create - Create a new container
[**--mac-address**[=*MAC-ADDRESS*]]
[**--name**[=*NAME*]]
[**--net**[=*"bridge"*]]
+[**--oom-kill-disable**[=*false*]]
[**-P**|**--publish-all**[=*false*]]
[**-p**|**--publish**[=*[]*]]
[**--pid**[=*[]*]]
+[**--uts**[=*[]*]]
[**--privileged**[=*false*]]
[**--read-only**[=*false*]]
[**--restart**[=*RESTART*]]
@@ -58,6 +62,9 @@ IMAGE [COMMAND] [ARG...]
**--add-host**=[]
Add a custom host-to-IP mapping (host:ip)
+**--blkio-weight**=0
+ Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+
**-c**, **--cpu-shares**=0
CPU shares (relative weight)
@@ -73,6 +80,9 @@ IMAGE [COMMAND] [ARG...]
**--cgroup-parent**=""
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+**--cpu-peroid**=0
+ Limit the CPU CFS (Completely Fair Scheduler) period
+
**--cpuset-cpus**=""
CPUs in which to allow execution (0-3, 0,1)
@@ -128,7 +138,8 @@ two memory nodes.
Read labels from a file. Delimit each label with an EOL.
**--link**=[]
- Add link to another container in the form of :alias
+ Add link to another container in the form of :alias or just
+ in which case the alias will match the name.
**--lxc-conf**=[]
(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
@@ -150,7 +161,7 @@ system's page size (the value would be very large, that's millions of trillions)
Total memory limit (memory + swap)
Set `-1` to disable swap (format: , where unit = b, k, m or g).
-This value should always larger than **-m**, so you should alway use this with **-m**.
+This value should always larger than **-m**, so you should always use this with **-m**.
**--mac-address**=""
Container MAC address (e.g. 92:d0:c6:0a:29:33)
@@ -165,6 +176,9 @@ This value should always larger than **-m**, so you should alway use this with *
'container:': reuses another container network stack
'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
+**--oom-kill-disable**=*true*|*false*
+ Whether to disable OOM Killer for the container or not.
+
**-P**, **--publish-all**=*true*|*false*
Publish all exposed ports to random ports on the host interfaces. The default is *false*.
@@ -180,6 +194,11 @@ This value should always larger than **-m**, so you should alway use this with *
**host**: use the host's PID namespace inside the container.
Note: the host mode gives the container full access to local PID and is therefore considered insecure.
+**--uts**=host
+ Set the UTS mode for the container
+ **host**: use the host's UTS namespace inside the container.
+ Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
+
**--privileged**=*true*|*false*
Give extended privileges to this container. The default is *false*.
diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md
index 312fa397f5..c1de7b59ed 100644
--- a/docs/man/docker-exec.1.md
+++ b/docs/man/docker-exec.1.md
@@ -9,7 +9,6 @@ docker-exec - Run a command in a running container
[**-d**|**--detach**[=*false*]]
[**--help**]
[**-i**|**--interactive**[=*false*]]
-[**--privileged**[=*false*]]
[**-t**|**--tty**[=*false*]]
[**-u**|**--user**[=*USER*]]
CONTAINER COMMAND [ARG...]
@@ -34,13 +33,6 @@ container is unpaused, and then run
**-i**, **--interactive**=*true*|*false*
Keep STDIN open even if not attached. The default is *false*.
-**--privileged**=*true*|*false*
- Give extended privileges to the process to run in a running container. The default is *false*.
-
- By default, the process run by docker exec in a running container
-have the same capabilities of the container. By setting --privileged will give
-all the capabilities to the process.
-
**-t**, **--tty**=*true*|*false*
Allocate a pseudo-TTY. The default is *false*.
diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md
index c5151f1107..16dd864767 100644
--- a/docs/man/docker-images.1.md
+++ b/docs/man/docker-images.1.md
@@ -66,6 +66,11 @@ used in builds use **-a**:
docker images -a
+Previously, the docker images command supported the --tree and --dot arguments,
+which displayed different visualizations of the image data. Docker core removed
+this functionality in the 1.7 version. If you liked this functionality, you can
+still find it in the third-party dockviz tool: https://github.com/justone/dockviz.
+
## Listing only the shortened image IDs
Listing just the shortened image IDs. This can be useful for some automated
diff --git a/docs/man/docker-logs.1.md b/docs/man/docker-logs.1.md
index 01a15f54dc..e2cacea223 100644
--- a/docs/man/docker-logs.1.md
+++ b/docs/man/docker-logs.1.md
@@ -8,6 +8,7 @@ docker-logs - Fetch the logs of a container
**docker logs**
[**-f**|**--follow**[=*false*]]
[**--help**]
+[**--since**[=*SINCE*]]
[**-t**|**--timestamps**[=*false*]]
[**--tail**[=*"all"*]]
CONTAINER
@@ -31,6 +32,9 @@ then continue streaming new output from the container’s stdout and stderr.
**-f**, **--follow**=*true*|*false*
Follow log output. The default is *false*.
+**--since**=""
+ Show logs since timestamp
+
**-t**, **--timestamps**=*true*|*false*
Show timestamps. The default is *false*.
@@ -42,3 +46,4 @@ April 2014, Originally compiled by William Henry (whenry at redhat dot com)
based on docker.com source material and internal work.
June 2014, updated by Sven Dowideit
July 2014, updated by Sven Dowideit
+April 2015, updated by Ahmet Alp Balkan
diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md
index f2ce4b7774..eec0f1cefc 100644
--- a/docs/man/docker-run.1.md
+++ b/docs/man/docker-run.1.md
@@ -8,10 +8,12 @@ docker-run - Run a command in a new container
**docker run**
[**-a**|**--attach**[=*[]*]]
[**--add-host**[=*[]*]]
+[**--blkio-weight**[=*[BLKIO-WEIGHT]*]]
[**-c**|**--cpu-shares**[=*0*]]
[**--cap-add**[=*[]*]]
[**--cap-drop**[=*[]*]]
[**--cidfile**[=*CIDFILE*]]
+[**--cpu-period**[=*0*]]
[**--cpuset-cpus**[=*CPUSET-CPUS*]]
[**--cpuset-mems**[=*CPUSET-MEMS*]]
[**-d**|**--detach**[=*false*]]
@@ -37,9 +39,11 @@ docker-run - Run a command in a new container
[**--mac-address**[=*MAC-ADDRESS*]]
[**--name**[=*NAME*]]
[**--net**[=*"bridge"*]]
+[**--oom-kill-disable**[=*false*]]
[**-P**|**--publish-all**[=*false*]]
[**-p**|**--publish**[=*[]*]]
[**--pid**[=*[]*]]
+[**--uts**[=*[]*]]
[**--privileged**[=*false*]]
[**--read-only**[=*false*]]
[**--restart**[=*RESTART*]]
@@ -85,6 +89,9 @@ each of stdin, stdout, and stderr.
Add a line to /etc/hosts. The format is hostname:ip. The **--add-host**
option can be set multiple times.
+**--blkio-weight**=0
+ Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+
**-c**, **--cpu-shares**=0
CPU shares (relative weight)
@@ -133,6 +140,11 @@ division of CPU shares:
**--cidfile**=""
Write the container ID to the file
+**--cpu-period**=0
+ Limit the CPU CFS (Completely Fair Scheduler) period
+
+ Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify.
+
**--cpuset-cpus**=""
CPUs in which to allow execution (0-3, 0,1)
@@ -227,7 +239,8 @@ ENTRYPOINT.
Read in a line delimited file of labels
**--link**=[]
- Add link to another container in the form of :alias
+ Add link to another container in the form of :alias or just
+in which case the alias will match the name
If the operator
uses **--link** when starting the new client container, then the client
@@ -285,6 +298,9 @@ and foreground Docker containers.
'container:': reuses another container network stack
'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
+**--oom-kill-disable**=*true*|*false*
+ Whether to disable OOM Killer for the container or not.
+
**-P**, **--publish-all**=*true*|*false*
Publish all exposed ports to random ports on the host interfaces. The default is *false*.
@@ -308,6 +324,11 @@ ports and the exposed ports, use `docker port`.
**host**: use the host's PID namespace inside the container.
Note: the host mode gives the container full access to local PID and is therefore considered insecure.
+**--uts**=host
+ Set the UTS mode for the container
+ **host**: use the host's UTS namespace inside the container.
+ Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
+
**--privileged**=*true*|*false*
Give extended privileges to this container. The default is *false*.
diff --git a/docs/man/docker-stats.1.md b/docs/man/docker-stats.1.md
index f6fc3f7f23..4b48588559 100644
--- a/docs/man/docker-stats.1.md
+++ b/docs/man/docker-stats.1.md
@@ -17,6 +17,9 @@ Display a live stream of one or more containers' resource usage statistics
**--help**
Print usage statement
+**--no-stream**="false"
+ Disable streaming stats and only pull the first result
+
# EXAMPLES
Run **docker stats** with multiple containers.
diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md
index 4e7cafe466..884f181ca3 100644
--- a/docs/man/docker.1.md
+++ b/docs/man/docker.1.md
@@ -53,6 +53,12 @@ To see the man page for a command run **man docker **.
**-e**, **--exec-driver**=""
Force Docker to use specific exec driver. Default is `native`.
+**--exec-opt**=[]
+ Set exec driver options. See EXEC DRIVER OPTIONS.
+
+**--exec-root**=""
+ Path to use as the root of the Docker execdriver. Default is `/var/run/docker`.
+
**--fixed-cidr**=""
IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
@@ -111,6 +117,9 @@ unix://[/path/to/socket] to use.
**-s**, **--storage-driver**=""
Force the Docker runtime to use a specific storage driver.
+**--selinux-enabled**=*true*|*false*
+ Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
+
**--storage-opt**=[]
Set storage driver options. See STORAGE DRIVER OPTIONS.
@@ -121,15 +130,12 @@ unix://[/path/to/socket] to use.
Use TLS and verify the remote (daemon: verify client, client: verify daemon).
Default is false.
+**--userland-proxy**=*true*|*false*
+ Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true.
+
**-v**, **--version**=*true*|*false*
Print version information and quit. Default is false.
-**--exec-opt**=[]
- Set exec driver options. See EXEC DRIVER OPTIONS.
-
-**--selinux-enabled**=*true*|*false*
- Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
-
# COMMANDS
**attach**
Attach to a running container
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index fb08e289e1..73018a5b73 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -25,14 +25,22 @@ pages:
# Introduction:
- ['index.md', 'About', 'Docker']
-- ['release-notes.md', 'About', 'Release notes']
-- ['introduction/index.md', '**HIDDEN**']
- ['introduction/understanding-docker.md', 'About', 'Understanding Docker']
+- ['release-notes.md', 'About', 'Release notes']
+# Experimental
+- ['experimental/experimental.md', 'About', 'Experimental Features']
+- ['experimental/plugin_api.md', '**HIDDEN**']
+- ['experimental/plugins_volume.md', '**HIDDEN**']
+- ['experimental/plugins.md', '**HIDDEN**']
+- ['reference/glossary.md', 'About', 'Glossary']
+- ['introduction/index.md', '**HIDDEN**']
+
# Installation:
- ['installation/index.md', '**HIDDEN**']
- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu']
- ['installation/mac.md', 'Installation', 'Mac OS X']
+- ['kitematic/index.md', 'Installation', 'Kitematic on OS X']
- ['installation/windows.md', 'Installation', 'Microsoft Windows']
- ['installation/testing-windows-docker-client.md', 'Installation', 'Building and testing the Windows Docker client']
- ['installation/amazon.md', 'Installation', 'Amazon EC2']
@@ -71,11 +79,13 @@ pages:
- ['compose/extends.md', 'User Guide', ' ▪ Extend Compose services' ]
- ['machine/index.md', 'User Guide', 'Docker Machine' ]
- ['swarm/index.md', 'User Guide', 'Docker Swarm' ]
+- ['kitematic/userguide.md', 'User Guide', 'Kitematic']
# Docker Hub docs:
- ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ]
- ['docker-hub/accounts.md', 'Docker Hub', 'Accounts']
-- ['docker-hub/repos.md', 'Docker Hub', 'Repositories']
+- ['docker-hub/userguide.md', 'Docker Hub', 'User Guide']
+- ['docker-hub/repos.md', 'Docker Hub', 'Your Repositories']
- ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds']
- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repositories']
@@ -87,6 +97,7 @@ pages:
- ['docker-hub-enterprise/install.md', 'Docker Hub Enterprise', ' Installation' ]
- ['docker-hub-enterprise/configuration.md', 'Docker Hub Enterprise', ' Configuration options' ]
- ['docker-hub-enterprise/support.md', 'Docker Hub Enterprise', 'Support' ]
+- ['docker-hub-enterprise/release-notes.md', 'Docker Hub Enterprise', 'Release notes' ]
# Examples:
- ['examples/index.md', '**HIDDEN**']
@@ -101,6 +112,9 @@ pages:
- ['compose/django.md', 'Examples', 'Getting started with Compose and Django']
- ['compose/rails.md', 'Examples', 'Getting started with Compose and Rails']
- ['compose/wordpress.md', 'Examples', 'Getting started with Compose and Wordpress']
+- ['kitematic/minecraft-server.md', 'Examples', 'Kitematic: Minecraft server']
+- ['kitematic/nginx-web-server.md', 'Examples', 'Kitematic: Ngnix web server']
+- ['kitematic/rethinkdb-dev-database.md', 'Examples', 'Kitematic: RethinkDB development database']
# Articles
- ['articles/index.md', '**HIDDEN**']
@@ -131,6 +145,7 @@ pages:
- ['reference/builder.md', 'Reference', 'Dockerfile']
- ['faq.md', 'Reference', 'FAQ']
- ['reference/run.md', 'Reference', 'Run reference']
+- ['reference/logging/journald.md', '**HIDDEN**']
- ['compose/cli.md', 'Reference', 'Compose command line']
- ['compose/yml.md', 'Reference', 'Compose yml']
- ['compose/env.md', 'Reference', 'Compose ENV variables']
@@ -155,6 +170,7 @@ pages:
- ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API']
#- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0']
- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
+- ['reference/api/docker_remote_api_v1.19.md', 'Reference', 'Docker Remote API v1.19']
- ['reference/api/docker_remote_api_v1.18.md', 'Reference', 'Docker Remote API v1.18']
- ['reference/api/docker_remote_api_v1.17.md', 'Reference', 'Docker Remote API v1.17']
- ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16']
@@ -176,6 +192,8 @@ pages:
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API client libraries']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub accounts API']
+- ['kitematic/faq.md', 'Reference', 'Kitematic: FAQ']
+- ['kitematic/known-issues.md', 'Reference', 'Kitematic: Known issues']
# Hidden registry files
- ['registry/storage-drivers/azure.md', '**HIDDEN**' ]
diff --git a/docs/sources/articles/configuring.md b/docs/sources/articles/configuring.md
index 35d0eb8e58..7bd92417cd 100644
--- a/docs/sources/articles/configuring.md
+++ b/docs/sources/articles/configuring.md
@@ -1,54 +1,74 @@
-page_title: Configuring Docker
-page_description: Configuring the Docker daemon on various distributions
-page_keywords: docker, daemon, configuration
+page_title: Configuring and running Docker
+page_description: Configuring and running the Docker daemon on various distributions
+page_keywords: docker, daemon, configuration, running, process managers
-# Configuring Docker on various distributions
+# Configuring and running Docker on various distributions
-After successfully installing Docker, the `docker` daemon runs with it's default
-configuration. You can configure the `docker` daemon by passing configuration
-flags to it directly when you start it.
+After successfully installing Docker, the `docker` daemon runs with its default
+configuration.
In a production environment, system administrators typically configure the
-`docker` daemon to start and stop according to an organization's requirements. In most
+`docker` daemon to start and stop according to an organization's requirements. In most
cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`,
or `systemd` to manage the `docker` daemon's start and stop.
+### Running the docker daemon directly
+
+The `docker` daemon can be run directly using the `-d` option. By default it listens on
+the Unix socket `unix:///var/run/docker.sock`
+
+ $ docker -d
+
+ INFO[0000] +job init_networkdriver()
+ INFO[0000] +job serveapi(unix:///var/run/docker.sock)
+ INFO[0000] Listening for HTTP on unix (/var/run/docker.sock)
+ ...
+ ...
+
+### Configuring the docker daemon directly
+
+If you're running the `docker` daemon directly by running `docker -d` instead
+of using a process manager, you can append the configuration options to the `docker` run
+command directly. Just like the `-d` option, other options can be passed to the `docker`
+daemon to configure it.
+
Some of the daemon's options are:
| Flag | Description |
|-----------------------|-----------------------------------------------------------|
-| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. |
+| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. |
| `-H`,`--host=[]` | Daemon socket(s) to connect to. |
| `--tls=false` | Enable or disable TLS. By default, this is false. |
-The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon).
-## Direct Configuration
+Here is a an example of running the `docker` daemon with configuration options:
-If you're running the `docker` daemon directly by running `docker -d` instead of using a process manager,
-you can append the config options to the run command directly.
+ $ docker -d -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376
+These options :
-Here is a an example of running the `docker` daemon with config options:
-
- docker -d -D --tls=false -H tcp://0.0.0.0:2375
-
-These options :
-
-- Enable `-D` (debug) mode
-- Set `tls` to false
-- Listen for connections on `tcp://0.0.0.0:2375`
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
## Ubuntu
-After successfully [installing Docker for Ubuntu](/installation/ubuntulinux/), you can check the
-running status using Upstart in this way:
+As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs
+are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`.
+
+After successfully [installing Docker for Ubuntu](/installation/ubuntulinux/),
+you can check the running status using Upstart in this way:
$ sudo status docker
+
docker start/running, process 989
-You can start/stop/restart `docker` using
+### Running Docker
+
+You can start/stop/restart the `docker` daemon using
$ sudo start docker
@@ -60,39 +80,159 @@ You can start/stop/restart `docker` using
### Configuring Docker
You configure the `docker` daemon in the `/etc/default/docker` file on your
-system. You do this by specifying values in a `DOCKER_OPTS` variable.
+system. You do this by specifying values in a `DOCKER_OPTS` variable.
+
To configure Docker options:
-1. Log into your system as a user with `sudo` or `root` privileges.
+1. Log into your host as a user with `sudo` or `root` privileges.
-2. If you don't have one, create the `/etc/default/docker` file in your system.
-
- Depending on how you installed Docker, you may already have this file.
+2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how
+you installed Docker, you may already have this file.
3. Open the file with your favorite editor.
- $ sudo vi /etc/default/docker
-
+ ```
+ $ sudo vi /etc/default/docker
+ ```
+
4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the
`docker` daemon's run command.
- ```
- DOCKER_OPTS=" --dns 8.8.8.8 --dns 8.8.4.4 -D --tls=false -H tcp://0.0.0.0:2375 "
- ```
-
-These options :
+```
+ DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376"
+```
+
+These options :
+
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
+
-- Set `dns` server for all containers
-- Enable `-D` (debug) mode
-- Set `tls` to false
-- Listen for connections on `tcp://0.0.0.0:2375`
-
5. Save and close the file.
6. Restart the `docker` daemon.
- $ sudo restart docker
+ ```
+ $ sudo restart docker
+ ```
-7. Verify that the `docker` daemon is running as specified wit the `ps` command.
+7. Verify that the `docker` daemon is running as specified with the `ps` command.
- $ ps aux | grep docker | grep -v grep
+ ```
+ $ ps aux | grep docker | grep -v grep
+ ```
+
+### Logs
+
+By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon
+can be located at `/var/log/upstart/docker.log`
+
+ $ tail -f /var/log/upstart/docker.log
+ INFO[0000] Loading containers: done.
+ INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs
+ INFO[0000] +job acceptconnections()
+ INFO[0000] -job acceptconnections() = OK (0)
+ INFO[0000] Daemon has completed initialization
+
+
+## CentOS / Red Hat Enterprise Linux / Fedora
+
+As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses
+`systemd` as its process manager.
+
+After successfully installing Docker for [CentOS](/installation/centos/)/[Red Hat Enterprise Linux]
+(/installation/rhel/)/[Fedora](/installation/fedora), you can check the running status in this way:
+
+ $ sudo systemctl status docker
+
+### Running Docker
+
+You can start/stop/restart the `docker` daemon using
+
+ $ sudo systemctl start docker
+
+ $ sudo systemctl stop docker
+
+ $ sudo systemctl restart docker
+
+If you want Docker to start at boot, you should also:
+
+ $ sudo systemctl enable docker
+
+### Configuring Docker
+
+You configure the `docker` daemon in the `/etc/sysconfig/docker` file on your
+host. You do this by specifying values in a variable. For CentOS 7.x and RHEL 7.x, the name
+of the variable is `OPTIONS` and for CentOS 6.x and RHEL 6.x, the name of the variable is
+`other_args`. For this section, we will use CentOS 7.x as an example to configure the `docker`
+daemon.
+
+By default, systemd services are located either in `/etc/systemd/service`, `/lib/systemd/system`
+or `/usr/lib/systemd/system`. The `docker.service` file can be found in either of these three
+directories depending on your host.
+
+To configure Docker options:
+
+1. Log into your host as a user with `sudo` or `root` privileges.
+
+2. If you don't have one, create the `/etc/sysconfig/docker` file on your host. Depending on how
+you installed Docker, you may already have this file.
+
+3. Open the file with your favorite editor.
+
+ ```
+ $ sudo vi /etc/sysconfig/docker
+ ```
+
+4. Add a `OPTIONS` variable with the following options. These options are appended to the
+command that starts the `docker` daemon.
+
+```
+ OPTIONS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376"
+```
+
+These options :
+
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
+
+5. Save and close the file.
+
+6. Restart the `docker` daemon.
+
+ ```
+ $ sudo service docker restart
+ ```
+
+7. Verify that the `docker` daemon is running as specified with the `ps` command.
+
+ ```
+ $ ps aux | grep docker | grep -v grep
+ ```
+
+### Logs
+
+systemd has its own logging system called the journal. The logs for the `docker` daemon can
+be viewed using `journalctl -u docker`
+
+ $ sudo journalctl -u docker
+ May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine...
+ May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)"
+ May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)"
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()"
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper"
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
+ May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
+
+_Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._
diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md
index 823b450c75..8400d1a6e3 100644
--- a/docs/sources/articles/networking.md
+++ b/docs/sources/articles/networking.md
@@ -93,6 +93,9 @@ server when it starts up, and cannot be changed once it is running:
* `--mtu=BYTES` — see
[Customizing docker0](#docker0)
+ * `--userland-proxy=true|false` — see
+ [Binding container ports](#binding-ports)
+
There are two networking options that can be supplied either at startup
or when `docker run` is invoked. When provided at startup, set the
default value that `docker run` will later use if the options are not
@@ -399,7 +402,7 @@ machine that the Docker server creates when it starts:
...
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
- MASQUERADE all -- 172.17.0.0/16 !172.17.0.0/16
+ MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0
...
But if you want containers to accept incoming connections, you will need
@@ -452,6 +455,21 @@ address, you can edit your system-wide Docker server settings and add the
option `--ip=IP_ADDRESS`. Remember to restart your Docker server after
editing this setting.
+> **Note**:
+> With hairpin NAT enabled (`--userland-proxy=false`), containers port exposure
+> is achieved purely through iptables rules, and no attempt to bind the exposed
+> port is ever made. This means that nothing prevents shadowing a previously
+> listening service outside of Docker through exposing the same port for a
+> container. In such conflicting situation, Docker created iptables rules will
+> take precedence and route to the container.
+
+The `--userland-proxy` parameter, true by default, provides a userland
+implementation for inter-container and outside-to-container communication. When
+disabled, Docker uses both an additional `MASQUERADE` iptable rule and the
+`net.ipv4.route_localnet` kernel parameter which allow the host machine to
+connect to a local container exposed port through the commonly used loopback
+address: this alternative is preferred for performance reason.
+
Again, this topic is covered without all of these low-level networking
details in the [Docker User Guide](/userguide/dockerlinks/) document if you
would like to use that as your port redirection reference instead.
@@ -574,7 +592,7 @@ Therefore the router thinks it can talk to these containers directly.
As soon as the router wants to send an IPv6 packet to the first container it
will transmit a neighbor solicitation request, asking, who has
-`2001:db8::c009`? But it will get no answer because noone on this subnet has
+`2001:db8::c009`? But it will get no answer because no one on this subnet has
this address. The container with this address is hidden behind the Docker host.
The Docker host has to listen to neighbor solicitation requests for the container
address and send a response that itself is the device that is responsible for
diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md
index 42d15e88c0..b3174c3c54 100644
--- a/docs/sources/articles/security.md
+++ b/docs/sources/articles/security.md
@@ -17,8 +17,8 @@ There are three major areas to consider when reviewing Docker security:
## Kernel namespaces
Docker containers are very similar to LXC containers, and they have
-similar security features. When you start a container with `docker
-run`, behind the scenes Docker creates a set of namespaces and control
+similar security features. When you start a container with
+`docker run`, behind the scenes Docker creates a set of namespaces and control
groups for the container.
**Namespaces provide the first and most straightforward form of
@@ -103,7 +103,7 @@ Docker directly on your local machine, outside of a VM). You can then
use traditional UNIX permission checks to limit access to the control
socket.
-You can also expose the REST API over HTTP if you explicitly decide so.
+You can also expose the REST API over HTTP if you explicitly decide to do so.
However, if you do that, being aware of the above mentioned security
implication, you should ensure that it will be reachable only from a
trusted network or VPN; or protected with e.g., `stunnel` and client SSL
@@ -253,7 +253,7 @@ an artificial capabilities set. Likewise, however, this artificial
capabilities set may require use of 'capsh' to restrict the
user-namespace capabilities set when using 'unshare'.
-Eventually, it is expected that Docker will direct, native support
+Eventually, it is expected that Docker will have direct, native support
for user-namespaces, simplifying the process of hardening containers.
## Conclusions
diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md
index c4c0d2c81d..18631ee2cf 100644
--- a/docs/sources/articles/systemd.md
+++ b/docs/sources/articles/systemd.md
@@ -30,8 +30,8 @@ If the `docker.service` file is set to use an `EnvironmentFile`
(often pointing to `/etc/sysconfig/docker`) then you can modify the
referenced file.
-Or, you may need to edit the `docker.service` file, which can be in `/usr/lib/systemd/system`
-or `/etc/systemd/service`.
+Or, you may need to edit the `docker.service` file, which can be in
+`/usr/lib/systemd/system`, `/etc/systemd/service`, or `/lib/systemd/system`.
### Runtime directory and storage driver
diff --git a/docs/sources/docker-hub-enterprise/adminguide.md b/docs/sources/docker-hub-enterprise/adminguide.md
index d471041675..66f099df44 100644
--- a/docs/sources/docker-hub-enterprise/adminguide.md
+++ b/docs/sources/docker-hub-enterprise/adminguide.md
@@ -53,7 +53,7 @@ following information:
* Error logs
* Crash logs
-## Emergency access to the DHE admin web interface
+## Emergency access to DHE
If your authenticated or public access to the DHE web interface has stopped
working, but your DHE admin container is still running, you can add an
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-logs.png b/docs/sources/docker-hub-enterprise/assets/admin-logs.png
index 76f0d19a80..3221cc54da 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-logs.png and b/docs/sources/docker-hub-enterprise/assets/admin-logs.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-metrics.png b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png
index ccec72a31a..965101fddb 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-metrics.png and b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png
index ef9dfe3513..530a160db0 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png
index 112a15c361..02715d3e77 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png
index 83cba1287c..145102a698 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png
index 27ce98b27d..149be8097c 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png
index d860c5088d..8c402bbccc 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png
index 488f212008..ab6082d9bb 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png
index 81d375040e..5d837dc697 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png
index 9aea039c68..a0359779d8 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings.png b/docs/sources/docker-hub-enterprise/assets/admin-settings.png
index 699e722e93..a900828eb9 100644
Binary files a/docs/sources/docker-hub-enterprise/assets/admin-settings.png and b/docs/sources/docker-hub-enterprise/assets/admin-settings.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/console-pull.png b/docs/sources/docker-hub-enterprise/assets/console-pull.png
index 57f264f4ea..db93646b2e 100755
Binary files a/docs/sources/docker-hub-enterprise/assets/console-pull.png and b/docs/sources/docker-hub-enterprise/assets/console-pull.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/console-push.png b/docs/sources/docker-hub-enterprise/assets/console-push.png
index 25acdc18c3..e5c45ef716 100755
Binary files a/docs/sources/docker-hub-enterprise/assets/console-push.png and b/docs/sources/docker-hub-enterprise/assets/console-push.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png
index 667e98b55b..907146e65c 100755
Binary files a/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png and b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png differ
diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png
index 6c8bd5f722..be7b28d599 100755
Binary files a/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png and b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png differ
diff --git a/docs/sources/docker-hub-enterprise/configuration.md b/docs/sources/docker-hub-enterprise/configuration.md
index 6050da401a..d537bc48eb 100644
--- a/docs/sources/docker-hub-enterprise/configuration.md
+++ b/docs/sources/docker-hub-enterprise/configuration.md
@@ -2,7 +2,9 @@ page_title: Docker Hub Enterprise: Configuration options
page_description: Configuration instructions for Docker Hub Enterprise
page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
-# Configuration options
+# Configuring DHE
+
+## Overview
This page will help you properly configure Docker Hub Enterprise (DHE) so it can
run in your environment.
@@ -20,7 +22,7 @@ configuration options. You'll see options for configuring:

-* *Domain Name*: **required**; defaults to an empty string, the fully qualified domain name assigned to the DHE host.
+* *Domain Name*: **required** defaults to an empty string, the fully qualified domain name assigned to the DHE host.
* *Load Balancer HTTP Port*: defaults to 80, used as the entry point for the image storage service. To see load balancer status, you can query
http://<dhe-host>/load_balancer_status.
* *Load Balancer HTTPS Port*: defaults to 443, used as the secure entry point
@@ -267,42 +269,85 @@ by the [Registry 2.0](http://docs.docker.com/registry/configuration/).
## Authentication
+The "Authentication" settings tab lets DHE administrators control access
+to the DHE web admin tool and to the DHE Registry.
+
The current authentication methods are `None`, `Basic` and `LDAP`.
-The `Basic` setting includes:
+> **Note**: if you have issues logging into the DHE admin web interface after changing the authentication
+> settings, you may need to use the [emergency access to the DHE admin web interface](./adminguide.md#Emergency-access-to-the-dhe-admin-web-interface).
+
+### No authentication
+
+No authentication means that everyone that can access your DHE web administration
+site. This is not recommended for any use other than testing.
+
+
+### Basic authentication
+
+The `Basic` authentication setting allows the admin to provide username/password pairs local to DHE.
+Any user who can successfully authenticate can use DHE to push and pull Docker images.
+You can optionally filter the list of users to a subset of just those users with access to the DHE
+admin web interface.

* A button to add one user, or to upload a CSV file containing username,
password pairs
* A DHE website Administrator Filter, allowing you to either
-* * 'Allow all authenticated users' to log into the DHE admin web interface, or
-* * 'Whitelist usernames', which allows you to restrict access to the web
-interface to the listed set of users.
+* * *Allow all authenticated users*: to log into the DHE admin web interface, or
+* * *Whitelist usernames*: which allows you to restrict access to the web interface to a listed set of users.
-The `LDAP` setting includes:
+### LDAP authentication
+
+Using LDAP authentication allows you to integrate your DHE registry into your
+organization's existing user and authentication database.
+
+As this involves existing infrastructure external to DHE and Docker, you will need to
+gather the details required to configure DHE for your organization's particular LDAP
+implementation.
+
+You can test that you have the necessary LDAP server information by using it from
+inside a Docker container running on the same server as your DHE:
+
+> **Note**: if the LDAP server is configured to use *StartTLS*, then you need to add `-Z` to the
+> `ldapsearch` command examples below.
+
+```
+docker run --rm -it svendowideit/ldapsearch -h -b -D -w
+```
+
+or if the LDAP server is set up to allow anonymous access (which means your *Search User DN* and *Search User Password* settings can remain empty):
+
+```
+docker run --rm -it svendowideit/ldapsearch -h -b -x
+```
+
+The result of these queries should be a (very) long list - if you get an authentication error,
+then the details you have been given are not sufficient.
+
+The *User Login Attribute* key setting must match the field used in the LDAP server
+for the user's login-name. On OpenLDAP, it's generally `uid`, and on Microsoft Active Directory
+servers, it's `sAMAccountName`. The `ldapsearch` output above should allow you to
+confirm which setting you need.

* *Use StartTLS*: defaults to unchecked, check to enable StartTLS
-* *LDAP Server URL*: **required**; defaults to null, LDAP server URL (e.g., - ldap://example.com)
-* *User Base DN*: **required**; defaults to null, user base DN in the form
-(e.g., - dc=example,dc=com)
-* *User Login Attribute*: **required**; defaults to null, user login attribute
-(e.g., - uid or sAMAccountName)
-* *Search User DN*:** required**; defaults to null, search user DN
-(e.g., - domain\username)
-* *Search User Password*: **required**; defaults to null, search user password
-* A *DHE Registry User filter*, allowing you to either
-* * 'Allow all authenticated users' to push or pull any images, or
-* * 'Filter LDAP search results', which allows you to restrict DHE registry pull
-and push to users matching the LDAP filter,
-* * 'Whitelist usernames', which allows you to restrict DHE registry pull and
-push to the listed set of users.
+* *LDAP Server URL*: **required** defaults to null, LDAP server URL (e.g., - ldap://example.com)
+* *User Base DN*: **required** defaults to null, user base DN in the form (e.g., - dc=example,dc=com)
+* *User Login Attribute*: **required** defaults to null, user login attribute (e.g., - uid or sAMAccountName)
+* *Search User DN*: **required** defaults to null, search user DN (e.g., - domain\username)
+* *Search User Password*: **required** defaults to null, search user password
+* A *DHE Registry User filter*: allowing you to either
+* * *Allow all authenticated users* to push or pull any images, or
+* * *Filter LDAP search results*: which allows you to restrict DHE registry pull and push to users matching the LDAP filter,
+* * *Whitelist usernames*: which allows you to restrict DHE registry pull and push to the listed set of users.
* A *DHE website Administrator filter*, allowing you to either
-* * 'Allow all authenticated users' to log into the DHE admin web interface, or
-* * 'Filter LDAP search results', which allows you to restrict DHE admin web access to users matching the LDAP filter,
-* * 'Whitelist usernames', which allows you to restrict access to the web interface to the listed set of users.
+* * *Allow all authenticated users*: to log into the DHE admin web interface, or
+* * *Filter LDAP search results*: which allows you to restrict DHE admin web access to users matching the LDAP filter,
+* * *Whitelist usernames*: which allows you to restrict access to the web interface to the listed set of users.
+
## Next Steps
diff --git a/docs/sources/docker-hub-enterprise/index.md b/docs/sources/docker-hub-enterprise/index.md
index c14bf9280f..d1875318f1 100644
--- a/docs/sources/docker-hub-enterprise/index.md
+++ b/docs/sources/docker-hub-enterprise/index.md
@@ -2,7 +2,9 @@ page_title: Docker Hub Enterprise: Overview
page_description: Docker Hub Enterprise
page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
-# Overview
+# Welcome to Docker Hub Enterprise
+
+## Overview
Docker Hub Enterprise (DHE) lets you run and manage your own Docker image
storage service, securely on your own infrastructure behind your company
@@ -29,7 +31,7 @@ DHE is perfect for:
DHE is built on [version 2 of the Docker registry](https://github.com/docker/distribution).
-## Documentation
+## Available Documentation
The following documentation for DHE is available:
diff --git a/docs/sources/docker-hub-enterprise/install.md b/docs/sources/docker-hub-enterprise/install.md
index 84f9a321b3..1bfa7d132d 100644
--- a/docs/sources/docker-hub-enterprise/install.md
+++ b/docs/sources/docker-hub-enterprise/install.md
@@ -2,7 +2,7 @@ page_title: Docker Hub Enterprise: Install
page_description: Installation instructions for Docker Hub Enterprise
page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
-# Install
+# Installing Docker Hub Enterprise
## Overview
@@ -33,9 +33,9 @@ copy of DHE.
## Prerequisites
-DHE requires the following:
+DHE 1.0.1 requires the following:
-* Commercially supported Docker Engine 1.6.0 or later running on an
+* Commercially supported Docker Engine 1.6.1 or later running on an
Ubuntu 14.04 LTS, RHEL 7.1 or RHEL 7.0 host. (See below for instructions on how
to install the commercially supported Docker Engine.)
@@ -112,6 +112,7 @@ $ chmod 755 docker-cs-engine-deb.sh
$ sudo ./docker-cs-engine-deb.sh
$ sudo apt-get install docker-engine-cs
```
+Lastly, confirm Docker is running with `sudo service docker start`.
In order to simplify using Docker, you can get non-sudo access to the Docker
socket by adding your user to the `docker` group, then logging out and back in
@@ -124,6 +125,35 @@ $ exit
> **Note**: you may need to reboot your server to update its LTS kernel.
+## Upgrading the Commercially Supported Docker Engine
+
+CS Docker Engine 1.6.1 contains fixes to security vulnerabilities,
+ and customers should upgrade to it immediately.
+
+> **Note**: If you have CS Docker Engine 1.6.0 installed, it must be upgraded;
+ however, due to compatibility issues, [DHE must be upgraded](#upgrading-docker-hub-enterprise)
+ first.
+
+The CS Docker Engine installation script set up the RHEL/Ubuntu package repositories,
+so upgrading the Engine only requires you to run the update commands on your server.
+
+### RHEL 7.0/7.1 upgrade
+
+To upgrade CS Docker Engine, run the following command:
+
+```
+ $ sudo yum update
+ $ sudo systemctl daemon-reload && sudo systemctl restart docker
+```
+
+### Ubuntu 14.04 LTS upgrade
+
+To upgrade CS Docker Engine, run the following command:
+
+```
+ $ sudo apt-get update && sudo apt-get dist-upgrade docker-engine-cs
+```
+
## Installing Docker Hub Enterprise
Once the commercially supported Docker Engine is installed, you can install DHE
@@ -132,7 +162,6 @@ and the [Docker Hub](https://registry.hub.docker.com/). It is able to restart
and reconfigure itself using the Docker socket that is bind-mounted to its
container.
-
Start installing DHE by running the "dockerhubenterprise/manager" container:
```
@@ -279,7 +308,7 @@ based authentication.
See [DHE Authentication settings](./configuration.md#authentication) for more
details.
-# Upgrading
+## Upgrading Docker Hub Enterprise
DHE has been designed to allow on-the-fly software upgrades. Start by
clicking on the "System Health" tab. In the upper, right-hand side of the
@@ -305,6 +334,13 @@ DHE.
Assuming you have a decent internet connection, the entire upgrade process
should complete within a few minutes.
+You should now [upgrade CS Docker Engine](#upgrading-the-commercially-supported-docker-engine).
+
+> **Note**: If Docker engine is upgraded first (DHE 1.0.0 on CS Docker Engine 1.6.1),
+> DHE can still be upgraded from the command line by running:
+>
+> `sudo bash -c "$(sudo docker run dockerhubenterprise/manager:1.0.0 upgrade 1.0.1)"`
+
## Next Steps
For information on configuring DHE for your environment, take a look at the
diff --git a/docs/sources/docker-hub-enterprise/quick-start.md b/docs/sources/docker-hub-enterprise/quick-start.md
index a813deb076..c9353639ad 100644
--- a/docs/sources/docker-hub-enterprise/quick-start.md
+++ b/docs/sources/docker-hub-enterprise/quick-start.md
@@ -46,7 +46,9 @@ You should be able to complete this guide in about thirty minutes.
> fundamentals, please consult the
> [Docker user guide](http://docs.docker.com/userguide/).
-First, you will retrieve a copy of the official Jenkins image from the Docker Hub. From the CLI of a machine running the Docker Engine on your network, use
+First, you will retrieve a copy of the official Jenkins image from the Docker Hub. By default, if
+Docker can't find an image locally, it will attempt to pull the image from the
+Docker Hub. From the CLI of a machine running the Docker Engine on your network, use
the
[`docker pull`](https://docs.docker.com/reference/commandline/cli/#pull)
command to pull the public Jenkins image.
@@ -57,7 +59,7 @@ command to pull the public Jenkins image.
> you are a member of the `docker` group, or have root privileges. Otherwise, you may
> need to add `sudo` to the example commands below.
-Docker will start the process of pulling the image from the Hub. Once it has completed, the Jenkins image should be visible in the output of a [`docker images`](https://docs.docker.com/reference/commandline/cli/#images) command:
+Docker will start the process of pulling the image from the Hub. Once it has completed, the Jenkins image should be visible in the output of a [`docker images`](https://docs.docker.com/reference/commandline/cli/#images) command, which lists your available images:
$ docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
@@ -192,7 +194,27 @@ image pulled earlier:
## Pushing to Docker Hub Enterprise
-Now that you’ve create the custom image, it can be pushed to DHE using the
+> **Note**: If your DHE instance has authentication enabled, you will need to
+> use your command line to `docker login ` (e.g., `docker login
+> dhe.yourdomain.com`).
+>
+> Failures due to unauthenticated `docker push` and `docker pull` commands will
+> look like :
+>
+> $ docker pull dhe.yourdomain.com/hello-world
+> Pulling repository dhe.yourdomain.com/hello-world
+> FATA[0001] Error: image hello-world:latest not found
+>
+> $ docker push dhe.yourdomain.com/hello-world
+> The push refers to a repository [dhe.yourdomain.com/hello-world] (len: 1)
+> e45a5af57b00: Image push failed
+> FATA[0001] Error pushing to registry: token auth attempt for registry
+> https://dhe.yourdomain.com/v2/:
+> https://dhe.yourdomain.com/auth/v2/token/
+> ?scope=repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com
+> request failed with status: 401 Unauthorized
+
+Now that you’ve created the custom image, it can be pushed to DHE using the
[`docker push`command](https://docs.docker.com/reference/commandline/cli/#push):
$ docker push dhe.yourdomain.com/ci-infrastructure/jnkns-img
diff --git a/docs/sources/docker-hub-enterprise/release-notes.md b/docs/sources/docker-hub-enterprise/release-notes.md
new file mode 100644
index 0000000000..f445e2ded2
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/release-notes.md
@@ -0,0 +1,240 @@
+page_title: Docker Hub Enterprise: Release notes
+page_description: Release notes for Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, release
+
+# Release Notes
+
+## Docker Hub Enterprise
+
+### DHE 1.0.1
+(11 May 2015)
+
+- Addresses compatibility issue with 1.6.1 CS Docker Engine
+
+### DHE 1.0.0
+(23 Apr 2015)
+
+- First release
+
+## Commercially Supported Docker Engine
+
+### CS Docker Engine 1.6.2-cs5
+(21 May 2015)
+
+For customers running Docker Engine on [supported versions of RedHat Enterprise
+Linux](https://www.docker.com/enterprise/support/) with [SELinux
+enabled](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/
+6/html/Security-Enhanced_Linux/sect-Security-Enhanced_Linux-Working_with_SELinux
+-Enabling_and_Disabling_SELinux.html), the `docker build` and `docker run`
+commands will not have DNS host name resolution and bind-mounted volumes may
+not be accessible.
+As a result, customers with SELinux will be unable to use hostname-based network
+access in either `docker build` or `docker run`, nor will they be able to
+`docker run` containers
+that use `--volume` or `-v` bind-mounts (with an incorrect SELinux label) in
+their environment. By installing Docker
+Engine 1.6.2-cs5, customers can use Docker as intended on RHEL with SELinux enabled.
+
+For example, you see will failures like:
+
+```
+[root@dhe ~]# docker -v
+Docker version 1.6.0-cs2, build b8dd430
+[root@dhe ~]# ping dhe.home.org.au
+PING dhe.home.org.au (10.10.10.104) 56(84) bytes of data.
+64 bytes from dhe.home.gateway (10.10.10.104): icmp_seq=1 ttl=64 time=0.663 ms
+^C
+--- dhe.home.org.au ping statistics ---
+2 packets transmitted, 2 received, 0% packet loss, time 1001ms
+rtt min/avg/max/mdev = 0.078/0.370/0.663/0.293 ms
+[root@dhe ~]# docker run --rm -it debian ping dhe.home.org.au
+ping: unknown host
+[root@dhe ~]# docker run --rm -it debian cat /etc/resolv.conf
+cat: /etc/resolv.conf: Permission denied
+[root@dhe ~]# docker run --rm -it debian apt-get update
+Err http://httpredir.debian.org jessie InRelease
+
+Err http://security.debian.org jessie/updates InRelease
+
+Err http://httpredir.debian.org jessie-updates InRelease
+
+Err http://security.debian.org jessie/updates Release.gpg
+ Could not resolve 'security.debian.org'
+Err http://httpredir.debian.org jessie Release.gpg
+ Could not resolve 'httpredir.debian.org'
+Err http://httpredir.debian.org jessie-updates Release.gpg
+ Could not resolve 'httpredir.debian.org'
+[output truncated]
+
+```
+
+or when running a `docker build`:
+
+```
+[root@dhe ~]# docker build .
+Sending build context to Docker daemon 11.26 kB
+Sending build context to Docker daemon
+Step 0 : FROM fedora
+ ---> e26efd418c48
+Step 1 : RUN yum install httpd
+ ---> Running in cf274900ea35
+
+One of the configured repositories failed (Fedora 21 - x86_64),
+and yum doesn't have enough cached data to continue. At this point the only
+safe thing yum can do is fail. There are a few ways to work "fix" this:
+
+[output truncated]
+```
+
+
+**Affected Versions**: All previous versions of Docker Engine when SELinux
+is enabled.
+
+Docker **highly recommends** that all customers running previous versions of
+Docker Engine update to this release.
+
+#### **How to workaround this issue**
+
+Customers who choose not to install this update have two options. The
+first option is to disable SELinux. This is *not recommended* for production
+systems where SELinux is typically required.
+
+The second option is to pass the following parameter in to `docker run`.
+
+ --security-opt=label:type:docker_t
+
+This parameter cannot be passed to the `docker build` command.
+
+#### **Upgrade notes**
+
+When upgrading, make sure you stop DHE first, perform the Engine upgrade, and
+then restart DHE.
+
+If you are running with SELinux enabled, previous Docker Engine releases allowed
+you to bind-mount additional volumes or files inside the container as follows:
+
+ $ docker run -it -v /home/user/foo.txt:/foobar.txt:ro
+
+In the 1.6.2-cs5 release, you must ensure additional bind-mounts have the correct
+SELinux context. For example, if you want to mount `foobar.txt` as read-only
+into the container, do the following to create and test your bind-mount:
+
+1. Add the `z` option to the bind mount when you specify `docker run`.
+
+ $ docker run -it -v /home/user/foo.txt:/foobar.txt:ro,z
+
+2. Exec into your new container.
+
+ For example, if your container is `bashful_curie`, open a shell on the
+ container:
+
+ $ docker exec -it bashful_curie bash
+
+3. Use `cat` to check the permissions on the mounted file.
+
+ $ cat /foobar.txt
+ the contents of foobar appear
+
+ If you see the file's contents, your mount succeeded. If you receive a
+ `Permission denied` message and/or the `/var/log/audit/audit.log` file on
+ your Docker host contains an AVC Denial message, the mount did not succeed.
+
+ type=AVC msg=audit(1432145409.197:7570): avc: denied { read } for pid=21167 comm="cat" name="foobar.txt" dev="xvda2" ino=17704136 scontext=system_u:system_r:svirt_lxc_net_t:s0:c909,c965 tcontext=unconfined_u:object_r:user_home_t:s0 tclass=file
+
+ Recheck your command line to make sure you passed in the `z` option.
+
+
+### CS Docker Engine 1.6.2-cs4
+(13 May 2015)
+
+Fix mount regression for `/sys`.
+
+### CS Docker Engine 1.6.1-cs3
+(11 May 2015)
+
+Docker Engine version 1.6.1 has been released to address several vulnerabilities
+and is immediately available for all supported platforms. Users are advised to
+upgrade existing installations of the Docker Engine and use 1.6.1 for new installations.
+
+It should be noted that each of the vulnerabilities allowing privilege escalation
+may only be exploited by a malicious Dockerfile or image. Users are advised to
+run their own images and/or images built by trusted parties, such as those in
+the official images library.
+
+Please send any questions to security@docker.com.
+
+
+#### **[CVE-2015-3629](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3629) Symlink traversal on container respawn allows local privilege escalation**
+
+Libcontainer version 1.6.0 introduced changes which facilitated a mount namespace
+breakout upon respawn of a container. This allowed malicious images to write
+files to the host system and escape containerization.
+
+Libcontainer and Docker Engine 1.6.1 have been released to address this
+vulnerability. Users running untrusted images are encouraged to upgrade Docker Engine.
+
+Discovered by Tõnis Tiigi.
+
+
+#### **[CVE-2015-3627](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3627) Insecure opening of file-descriptor 1 leading to privilege escalation**
+
+The file-descriptor passed by libcontainer to the pid-1 process of a container
+has been found to be opened prior to performing the chroot, allowing insecure
+open and symlink traversal. This allows malicious container images to trigger
+a local privilege escalation.
+
+Libcontainer and Docker Engine 1.6.1 have been released to address this
+vulnerability. Users running untrusted images are encouraged to upgrade
+Docker Engine.
+
+Discovered by Tõnis Tiigi.
+
+#### **[CVE-2015-3630](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3630) Read/write proc paths allow host modification & information disclosure**
+
+Several paths underneath /proc were writable from containers, allowing global
+system manipulation and configuration. These paths included `/proc/asound`,
+`/proc/timer_stats`, `/proc/latency_stats`, and `/proc/fs`.
+
+By allowing writes to `/proc/fs`, it has been noted that CIFS volumes could be
+forced into a protocol downgrade attack by a root user operating inside of a
+container. Machines having loaded the timer_stats module were vulnerable to
+having this mechanism enabled and consumed by a container.
+
+We are releasing Docker Engine 1.6.1 to address this vulnerability. All
+versions up to 1.6.1 are believed vulnerable. Users running untrusted
+images are encouraged to upgrade.
+
+Discovered by Eric Windisch of the Docker Security Team.
+
+#### **[CVE-2015-3631](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3631) Volume mounts allow LSM profile escalation**
+
+By allowing volumes to override files of `/proc` within a mount namespace, a user
+could specify arbitrary policies for Linux Security Modules, including setting
+an unconfined policy underneath AppArmor, or a `docker_t` policy for processes
+managed by SELinux. In all versions of Docker up until 1.6.1, it is possible for
+malicious images to configure volume mounts such that files of proc may be overridden.
+
+We are releasing Docker Engine 1.6.1 to address this vulnerability. All versions
+up to 1.6.1 are believed vulnerable. Users running untrusted images are encouraged
+to upgrade.
+
+Discovered by Eric Windisch of the Docker Security Team.
+
+#### **AppArmor policy improvements**
+
+The 1.6.1 release also marks preventative additions to the AppArmor policy.
+Recently, several CVEs against the kernel have been reported whereby mount
+namespaces could be circumvented through the use of the sys_mount syscall from
+inside of an unprivileged Docker container. In all reported cases, the
+AppArmor policy included in libcontainer and shipped with Docker has been
+sufficient to deflect these attacks. However, we have deemed it prudent to
+proactively tighten the policy further by outright denying the use of the
+`sys_mount` syscall.
+
+Because this addition is preventative, no CVE-ID is requested.
+
+### CS Docker Engine 1.6.0-cs2
+(23 Apr 2015)
+
+- First release, please see the [Docker Engine 1.6.0 Release notes](/release-notes/)
+ for more details.
diff --git a/docs/sources/docker-hub-enterprise/support.md b/docs/sources/docker-hub-enterprise/support.md
index ed60748a3a..1d58f8e785 100644
--- a/docs/sources/docker-hub-enterprise/support.md
+++ b/docs/sources/docker-hub-enterprise/support.md
@@ -2,7 +2,9 @@ page_title: Docker Hub Enterprise: Support
page_description: Commercial Support
page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, support
-# Commercial Support
+# Commercial Support Options
+
+## How to get support
Purchasing a DHE License or Commercial Support subscription means your questions
and issues about DHE will receive prioritized support.
diff --git a/docs/sources/docker-hub-enterprise/userguide.md b/docs/sources/docker-hub-enterprise/userguide.md
index 6d329722de..80908a8171 100644
--- a/docs/sources/docker-hub-enterprise/userguide.md
+++ b/docs/sources/docker-hub-enterprise/userguide.md
@@ -10,10 +10,21 @@ need to know about, such as pushing or pulling images, etc. For tasks DHE
administrators need to accomplish, such as configuring or monitoring DHE, please
visit the [Administrator's Guide](./adminguide.md).
-## Using DHE to push and pull images
+## Overview
The primary use case for DHE users is to push and pull images to and from the
-DHE image storage service. The following instructions describe these procedures.
+DHE image storage service. For example, you might pull an Official Image for
+Ubuntu from the Docker Hub, customize it with configuration settings for your
+infrastructure and then push it to your DHE image storage for other developers
+to pull and use for their development environments.
+
+Pushing and pulling images with DHE works very much like any other Docker
+registry: you use the `docker pull` command to retrieve images and the `docker
+push` command to add an image to a DHE repository. To learn more about Docker
+images, see
+[User Guide: Working with Docker Images](https://docs.docker.com/userguide/dockerimages/). For a step-by-step
+example of the entire process, see the
+[Quick Start: Basic Workflow Guide](./quick-start.md).
> **Note**: If your DHE instance has authentication enabled, you will need to
>use your command line to `docker login ` (e.g., `docker login
@@ -29,97 +40,81 @@ DHE image storage service. The following instructions describe these procedures.
> $ docker push dhe.yourdomain.com/hello-world
> The push refers to a repository [dhe.yourdomain.com/hello-world] (len: 1)
> e45a5af57b00: Image push failed
-> FATA[0001] Error pushing to registry: token auth attempt for registry https://dhe.yourdomain.com/v2/: https://> dhe.yourdomain.com/auth/v2/token/?scope=repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com > request failed with status: 401 Unauthorized
+> FATA[0001] Error pushing to registry: token auth attempt for registry
+> https://dhe.yourdomain.com/v2/:
+> https://dhe.yourdomain.com/auth/v2/token/?scope=
+> repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com
+> request failed with status: 401 Unauthorized
+## Pushing Images
-1. Pull the `hello-world` official image from the Docker Hub. By default, if
-Docker can't find an image locally, it will attempt to pull the image from the
-Docker Hub.
+You push an image up to a DHE repository by using the
+[`docker push` command](https://docs.docker.com/reference/commandline/cli/#push).
- `$ docker pull hello-world`
+You can add a `tag` to your image so that you can more easily identify it
+amongst other variants and so that it refers to your DHE server.
-2. List your available images.
+ `$ docker tag hello-world:latest dhe.yourdomain.com/yourusername/hello-mine:latest`
- $ docker images
- REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
- hello-world latest e45a5af57b00 3 months ago 910 B
+The command labels a `hello-world:latest` image using a new tag in the
+`[REGISTRYHOST/][USERNAME/]NAME[:TAG]` format. The `REGISTRYHOST` in this
+case is your DHE server, `dhe.yourdomain.com`, and the `USERNAME` is
+`yourusername`. Lastly, the image tag is set to `hello-mine:latest`.
- Your list should include the `hello-world` image from the earlier run.
-
-3. Re-tag the `hello-world` image so that it refers to your DHE server.
-
- `$ docker tag hello-world:latest dhe.yourdomain.com/demouser/hello-mine:latest`
-
- The command labels a `hello-world:latest` image using a new tag in the
- `[REGISTRYHOST/][USERNAME/]NAME[:TAG]` format. The `REGISTRYHOST` in this
- case is the DHE server, `dhe.yourdomain.com`, and the `USERNAME` is
- `demouser`.
-
-4. List your new image.
-
- $ docker images
- REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
- hello-world latest e45a5af57b00 3 months ago 910 B
- dhe.yourdomain.com/demouser/hello-mine latest e45a5af57b00 3 months ago 910 B
-
- You should see your new image label in the listing, with the same `IMAGE ID`
- as the Official image.
-
-5. Push this new image to your DHE server.
+Once an image is tagged, you can push it to DHE with:
`$ docker push dhe.yourdomain.com/demouser/hello-mine:latest`
+
+> **Note**: If the Docker daemon on which you are running `docker push` doesn't
+> have the right certificates set up, you will get an error similar to:
+>
+> $ docker push dhe.yourdomain.com/demouser/hello-world
+> FATA[0000] Error response from daemon: v1 ping attempt failed with error:
+> Get https://dhe.yourdomain.com/v1/_ping: x509: certificate signed by
+> unknown authority. If this private registry supports only HTTP or HTTPS
+> with an unknown CA certificate, please add `--insecure-registry
+> dhe.yourdomain.com` to the daemon's arguments. In the case of HTTPS, if
+> you have access to the registry's CA certificate, no need for the flag;
+> simply place the CA certificate at
+> /etc/docker/certs.d/dhe.yourdomain.com/ca.crt
-6. Set up a test of DHE by removing all images from your local environment:
+## Pulling images
- `$ docker rmi -f $(docker images -q -a)`
+You can retrieve an image with the
+[`docker pull` command](https://docs.docker.com/reference/commandline/cli/#run),
+or you can retrieve an image and run Docker to build the container with the
+[`docker run`command](https://docs.docker.com/reference/commandline/cli/#run).
- This command is for illustrative purposes only: removing the image forces
- any subsequent `run` to pull from a remote registry (such as DHE) rather
- than from a local cache. If you run `docker images` after this you should
- not see any instance of `hello-world` or `hello-mine` in your images list.
+To retrieve an image from DHE and then run Docker to build the container, add
+the needed info to `docker run`:
- $ docker images
- REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
-
-7. Try running `hello-mine`.
-
- $ docker run hello-mine
- Unable to find image 'hello-mine:latest' locally
- Pulling repository hello-mine
- FATA[0007] Error: image library/hello-mine:latest not found
-
- The `run` command fails because your new image doesn't exist on the Docker Hub.
-
-8. Run `hello-mine` again, this time pointing it to pull from DHE:
-
- $ docker run dhe.yourdomain.com/demouser/hello-mine
- latest: Pulling from dhe.yourdomain.com/demouser/hello-mine
+ $ docker run dhe.yourdomain.com/yourusername/hello-mine
+ latest: Pulling from dhe.yourdomain.com/yourusername/hello-mine
511136ea3c5a: Pull complete
31cbccb51277: Pull complete
e45a5af57b00: Already exists
Digest: sha256:45f0de377f861694517a1440c74aa32eecc3295ea803261d62f950b1b757bed1
Status: Downloaded newer image for dhe.yourdomain.com/demouser/hello-mine:latest
- If you run `docker images` after this you'll see a `hello-mine` image.
+Note that if you don't specify a version, by default the `latest` version of an
+image will be pulled.
+
+If you run `docker images` after this you'll see a `hello-mine` image.
$ docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
- dhe.yourdomain.com/demouser/hello-mine latest e45a5af57b00 3 months ago 910 B
+ dhe.yourdomain.com/yourusername/hello-mine latest e45a5af57b00 3 months ago 910 B
-> **Note**: If the Docker daemon on which you are running `docker push` doesn't
-> have the right certificates set up, you will get an error similar to:
->
-> $ docker push dhe.yourdomain.com/demouser/hello-world
-> FATA[0000] Error response from daemon: v1 ping attempt failed with error: Get https://dhe.yourdomain.com/v1/_ping: x509: certificate signed by unknown authority. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry dhe.yourdomain.com` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/dhe.yourdomain.com/ca.crt
+To pull an image without building the container, use `docker pull` and specify
+your DHE registry by adding it to the command:
+
+ $ docker pull dhe.yourdomain.com/yourusername/hello-mine
-9. You have now successfully created a custom image, `hello-mine`, tagged it,
- and pushed it to the image storage provided by your DHE instance. You then
- pulled that image back down from DHE and onto your machine, where you can
- use it to create a container containing the "Hello World" application..
## Next Steps
-For information on administering DHE, take a look at the [Administrator's Guide](./adminguide.md).
+For information on administering DHE, take a look at the
+[Administrator's Guide](./adminguide.md).
+* explain the fully-qualified repo/image name
+* explain how to remove an image from DHE -->
diff --git a/docs/sources/docker-hub/accounts.md b/docs/sources/docker-hub/accounts.md
index 360eb371f3..510111f8ef 100644
--- a/docs/sources/docker-hub/accounts.md
+++ b/docs/sources/docker-hub/accounts.md
@@ -34,21 +34,50 @@ page.
## Organizations and groups
-Also available on the Docker Hub are organizations and groups that allow
-you to collaborate across your organization or team. You can see what
-organizations [you belong to and add new organizations](
+A Docker Hub organization contains public and private repositories just like
+a user account. Access to push, pull or create these organisation owned repositories
+is allocated by defining groups of users and then assigning group rights to
+specific repositories. This allows you to distribute limited access
+Docker images, and to select which Docker Hub users can publish new images.
+
+### Creating and viewing organizations
+
+You can see what organizations [you belong to and add new organizations](
https://hub.docker.com/account/organizations/) from the Account Settings
-tab. They are also listed below your user name on your repositories page and in your account profile.
+tab. They are also listed below your user name on your repositories page
+and in your account profile.

-From within your organizations you can create groups that allow you to
-further manage who can interact with your repositories.
+### Organization groups
+
+Users in the `Owners` group of an organization can create and modify the
+membership of groups.
+
+Unless they are the organization's `Owner`, users can only see groups of which they
+are members.

-You can add or invite users to join groups by clicking on the organization and then clicking the edit button for the group to which you want to add members. Enter a user-name (for current Hub users) or email address (if they are not yet Hub users) for the person you want to invite. They will receive an email invitation to join the group.
+### Repository group permissions
-
+Use organization groups to manage who can interact with your repositories.
+
+You need to be a member of the organization's `Owners` group to create a new group,
+Hub repository or automated build. As an `Owner`, you then delegate the following
+repository access rights to groups:
+
+- `Read` access allows a user to view, search, and pull a private repository in the
+ same way as they can a public repository.
+- `Write` access users are able to push to non-automated repositories on the Docker
+ Hub.
+- `Admin` access allows the user to modify the repositories "Description", "Collaborators" rights,
+ "Mark as unlisted", "Public/Private" status and "Delete".
+
+> **Note**: A User who has not yet verified their email address will only have
+> `Read` access to the repository, regardless of the rights their group membership
+> gives them.
+
+
diff --git a/docs/sources/docker-hub/hub-images/dashboard.png b/docs/sources/docker-hub/hub-images/dashboard.png
new file mode 100644
index 0000000000..594c5d1457
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/dashboard.png differ
diff --git a/docs/sources/docker-hub/hub-images/groups.png b/docs/sources/docker-hub/hub-images/groups.png
index 0c6430efab..23dbbfcff4 100644
Binary files a/docs/sources/docker-hub/hub-images/groups.png and b/docs/sources/docker-hub/hub-images/groups.png differ
diff --git a/docs/sources/docker-hub/hub-images/hub.png b/docs/sources/docker-hub/hub-images/hub.png
index 16840e0547..489f730f96 100644
Binary files a/docs/sources/docker-hub/hub-images/hub.png and b/docs/sources/docker-hub/hub-images/hub.png differ
diff --git a/docs/sources/docker-hub/hub-images/invite.png b/docs/sources/docker-hub/hub-images/invite.png
index 0a157ffbf0..f663340443 100644
Binary files a/docs/sources/docker-hub/hub-images/invite.png and b/docs/sources/docker-hub/hub-images/invite.png differ
diff --git a/docs/sources/docker-hub/hub-images/org-repo-collaborators.png b/docs/sources/docker-hub/hub-images/org-repo-collaborators.png
new file mode 100644
index 0000000000..fdb53f7b63
Binary files /dev/null and b/docs/sources/docker-hub/hub-images/org-repo-collaborators.png differ
diff --git a/docs/sources/docker-hub/hub-images/orgs.png b/docs/sources/docker-hub/hub-images/orgs.png
index 604ed95a09..6987cd3b4e 100644
Binary files a/docs/sources/docker-hub/hub-images/orgs.png and b/docs/sources/docker-hub/hub-images/orgs.png differ
diff --git a/docs/sources/docker-hub/hub-images/repos.png b/docs/sources/docker-hub/hub-images/repos.png
index f25bb3a48d..4e83d34053 100644
Binary files a/docs/sources/docker-hub/hub-images/repos.png and b/docs/sources/docker-hub/hub-images/repos.png differ
diff --git a/docs/sources/docker-hub/index.md b/docs/sources/docker-hub/index.md
index 3651497e2c..db6694d3d5 100644
--- a/docs/sources/docker-hub/index.md
+++ b/docs/sources/docker-hub/index.md
@@ -4,20 +4,29 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub
# Docker Hub
+The [Docker Hub](https://hub.docker.com) provides a cloud-based platform service
+for distributed applications, including container image distribution and change
+management, user and team collaboration, and lifecycle workflow automation.
+

-## [Accounts](accounts/)
+## [Finding and pulling images](./userguide.md)
-[Learn how to create](accounts/) a [Docker Hub](https://hub.docker.com)
+Find out how to [use the Docker Hub](./userguide.md) to find and pull Docker
+images to run or build upon.
+
+## [Accounts](./accounts.md)
+
+[Learn how to create](./accounts.md) a Docker Hub
account and manage your organizations and groups.
-## [Repositories](repos/)
+## [Your Repositories](./repos.md)
Find out how to share your Docker images in [Docker Hub
-repositories](repos/) and how to store and manage private images.
+repositories](./repos.md) and how to store and manage private images.
-## [Automated builds](builds/)
+## [Automated builds](./builds.md)
Learn how to automate your build and deploy pipeline with [Automated
-Builds](builds/)
+Builds](./builds.md)
diff --git a/docs/sources/docker-hub/official_repos.md b/docs/sources/docker-hub/official_repos.md
index eb73b4bc20..98c33c6436 100644
--- a/docs/sources/docker-hub/official_repos.md
+++ b/docs/sources/docker-hub/official_repos.md
@@ -5,8 +5,8 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub
# Official Repositories on Docker Hub
The Docker [Official Repositories](http://registry.hub.docker.com/official) are
-a curated set of Docker repositories that are promoted on Docker Hub and
-supported by Docker, Inc. They are designed to:
+a curated set of Docker repositories that are promoted on Docker Hub. They are
+designed to:
* Provide essential base OS repositories (for example,
[`ubuntu`](https://registry.hub.docker.com/_/ubuntu/),
diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md
index a48040fb55..1c4176f481 100644
--- a/docs/sources/docker-hub/repos.md
+++ b/docs/sources/docker-hub/repos.md
@@ -1,42 +1,37 @@
-page_title: Repositories and images on Docker Hub
-page_description: Repositories and images on Docker Hub
+page_title: Your Repositories on Docker Hub
+page_description: Your Repositories on Docker Hub
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation
-# Repositories and images on Docker Hub
+# Your Hub repositories
+
+Docker Hub repositories make it possible for you to share images with co-workers,
+customers or the Docker community at large. If you're building your images internally,
+either on your own Docker daemon, or using your own Continuous integration services,
+you can push them to a Docker Hub repository that you add to your Docker Hub user or
+organization account.
+
+Alternatively, if the source code for your Docker image is on GitHub or Bitbucket,
+you can use an "Automated build" repository, which is built by the Docker Hub
+services. See the [automated builds documentation](./builds.md) to read about
+the extra functionality provided by those services.

-## Searching for repositories and images
-
-You can `search` for all the publicly available repositories and images using
-Docker.
-
- $ docker search ubuntu
-
-This will show you a list of the currently available repositories on the
-Docker Hub which match the provided keyword.
-
-If a repository is private it won't be listed on the repository search
-results. To see repository statuses, you can look at your [profile
-page](https://hub.docker.com) on [Docker Hub](https://hub.docker.com).
-
-## Repositories
-
Your Docker Hub repositories have a number of useful features.
-### Stars
+## Stars
Your repositories can be starred and you can star repositories in
return. Stars are a way to show that you like a repository. They are
also an easy way of bookmarking your favorites.
-### Comments
+## Comments
You can interact with other members of the Docker community and maintainers by
leaving comments on repositories. If you find any comments that are not
appropriate, you can flag them for review.
-### Collaborators and their role
+## Collaborators and their role
A collaborator is someone you want to give access to a private
repository. Once designated, they can `push` and `pull` to your
@@ -48,24 +43,9 @@ private to public.
> A collaborator cannot add other collaborators. Only the owner of
> the repository has administrative access.
-You can also collaborate on Docker Hub with organizations and groups.
-You can read more about that [here](accounts/).
-
-## Official Repositories
-
-The Docker Hub contains a number of [Official
-Repositories](http://registry.hub.docker.com/official). These are
-certified repositories from vendors and contributors to Docker. They
-contain Docker images from vendors like Canonical, Oracle, and Red Hat
-that you can use to build applications and services.
-
-If you use Official Repositories you know you're using a supported,
-optimized and up-to-date image to power your applications.
-
-> **Note:**
-> If you would like to contribute an Official Repository for your
-> organization, see [Official Repositories on Docker
-> Hub](/docker-hub/official_repos) for more information.
+You can also assign more granular collaborator rights ("Read", "Write", or "Admin")
+on Docker Hub by using organizations and groups. For more information
+see the [accounts documentation](accounts/).
## Private repositories
@@ -100,8 +80,15 @@ Hub](https://registry.hub.docker.com/plans/) plan.
## Webhooks
-You can configure webhooks for your repositories on the Repository
-Settings page. A webhook is called only after a successful `push` is
+A webhook is an HTTP call-back triggered by a specific event.
+You can use a Hub repository webhook to notify people, services, and other
+applications after a new image is pushed to your repository (this also happens
+for Automated builds). For example, you can trigger an automated test or
+deployment to happen as soon as the image is available.
+
+To get started adding webhooks, go to the desired repository in the Hub,
+and click "Webhooks" under the "Settings" box.
+A webhook is called only after a successful `push` is
made. The webhook calls are HTTP POST requests with a JSON payload
similar to the example shown below.
@@ -137,13 +124,9 @@ similar to the example shown below.
}
```
-Webhooks allow you to notify people, services and other applications of
-new updates to your images and repositories. To get started adding webhooks,
-go to the desired repository in the Hub, and click "Webhooks" under the "Settings"
-box.
+
-> **Note:** For testing, you can try an HTTP request tool like
-> [requestb.in](http://requestb.in/).
+For testing, you can try an HTTP request tool like [requestb.in](http://requestb.in/).
> **Note**: The Docker Hub servers are currently in the IP range
> `162.242.195.64 - 162.242.195.127`, so you can restrict your webhooks to
@@ -161,7 +144,7 @@ in your chain.
The first webhook in a chain will be called after a successful push. Subsequent
URLs will be contacted after the callback has been validated.
-#### Validating a callback
+### Validating a callback
In order to validate a callback in a webhook chain, you need to
@@ -195,3 +178,10 @@ The following parameters are recognized in callback data:
"context": "Continuous integration by Acme CI",
"target_url": "http://ci.acme.com/results/afd339c1c3d27"
}
+
+## Mark as unlisted
+
+By marking a repository as unlisted, you can create a publicly pullable repository
+which will not be in the Hub or commandline search. This allows you to have a limited
+release, but does not restrict access to anyone that is told, or guesses the repository
+name.
diff --git a/docs/sources/docker-hub/userguide.md b/docs/sources/docker-hub/userguide.md
new file mode 100644
index 0000000000..7ace5f358b
--- /dev/null
+++ b/docs/sources/docker-hub/userguide.md
@@ -0,0 +1,57 @@
+page_title: Docker Hub user guide
+page_description: Docker Hub user guide
+page_keywords: Docker, docker, registry, Docker Hub, docs, documentation
+
+# Using the Docker Hub
+
+Docker Hub is used to find and pull Docker images to run or build upon, and to
+distribute and build images for other users to use.
+
+
+
+## Finding repositories and images
+
+There are two ways you can search for public repositories and images available
+on the Docker Hub. You can use the "Search" tool on the Docker Hub website, or
+you can `search` for all the repositories and images using the Docker commandline
+tool:
+
+ $ docker search ubuntu
+
+Both will show you a list of the currently available public repositories on the
+Docker Hub which match the provided keyword.
+
+If a repository is private or marked as unlisted, it won't be in the repository
+search results. To see all the repositories you have access to and their statuses,
+you can look at your profile page on [Docker Hub](https://hub.docker.com).
+
+## Pulling, running and building images
+
+You can find more information on [working with Docker images](../userguide/dockerimages.md).
+
+## Official Repositories
+
+The Docker Hub contains a number of [Official
+Repositories](http://registry.hub.docker.com/official). These are
+certified repositories from vendors and contributors to Docker. They
+contain Docker images from vendors like Canonical, Oracle, and Red Hat
+that you can use to build applications and services.
+
+If you use Official Repositories you know you're using an optimized and
+up-to-date image to power your applications.
+
+> **Note:**
+> If you would like to contribute an Official Repository for your
+> organization, see [Official Repositories on Docker
+> Hub](/docker-hub/official_repos) for more information.
+
+## Building and shipping your own repositories and images
+
+The Docker Hub provides you and your team with a place to build and ship Docker images.
+
+Collections of Docker images are managed using repositories -
+
+You can configure two types of repositories to manage on the Docker Hub:
+[Repositories](./repos.md), which allow you to push images to the Hub from your local Docker daemon,
+and [Automated Builds](./builds.md), which allow you to configure GitHub or Bitbucket to
+trigger the Hub to rebuild repositories when changes are made to the repository.
diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md
index 376a8d3ffe..b7b423b747 100644
--- a/docs/sources/examples/mongodb.md
+++ b/docs/sources/examples/mongodb.md
@@ -10,6 +10,11 @@ In this example, we are going to learn how to build a Docker image with
MongoDB pre-installed. We'll also see how to `push` that image to the
[Docker Hub registry](https://hub.docker.com) and share it with others!
+> **Note:**
+>
+> This guide will show the mechanics of building a MongoDB container, but
+> you will probably want to use the official image on [Docker Hub]( https://registry.hub.docker.com/_/mongo/)
+
Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/)
instances will bring several benefits, such as:
@@ -59,8 +64,8 @@ a MongoDB repository file for the package manager.
# Installation:
# Import MongoDB public GPG key AND create a MongoDB list file
- RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv 7F0CEB10
- RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
+ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+ RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
After this initial preparation we can update our packages and install MongoDB.
@@ -70,7 +75,7 @@ After this initial preparation we can update our packages and install MongoDB.
> **Tip:** You can install a specific version of MongoDB by using a list
> of required packages with versions, e.g.:
>
-> RUN apt-get update && apt-get install -y mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
+> RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1
MongoDB requires a data directory. Let's create it as the final step of our
installation instructions.
@@ -86,7 +91,7 @@ the `EXPOSE` instruction.
EXPOSE 27017
# Set usr/bin/mongod as the dockerized entry-point application
- ENTRYPOINT usr/bin/mongod
+ ENTRYPOINT ["/usr/bin/mongod"]
Now save the file and let's build our image.
@@ -133,11 +138,11 @@ as daemon process(es).
# Basic way
# Usage: docker run --name -d /
- $ docker run --name mongo_instance_001 -d my/repo
+ $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo
# Dockerized MongoDB, lean and mean!
# Usage: docker run --name -d / --noprealloc --smallfiles
- $ docker run --name mongo_instance_001 -d my/repo --noprealloc --smallfiles
+ $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --noprealloc --smallfiles
# Checking out the logs of a MongoDB container
# Usage: docker logs
@@ -145,7 +150,23 @@ as daemon process(es).
# Playing with MongoDB
# Usage: mongo --port
- $ mongo --port 12345
+ $ mongo --port 27017
+
+ # If using boot2docker
+ # Usage: mongo --port --host
+ $ mongo --port 27017 --host 192.168.59.103
+
+> **Tip:**
+If you want to run two containers on the same engine, then you will need to map
+the exposed port to two different ports on the host
+
+ # Start two containers and map the ports
+ $ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo
+ $ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo
+
+ # Now you can connect to each MongoDB instance on the two ports
+ $ mongo --port 28001
+ $ mongo --port 28002
- [Linking containers](/userguide/dockerlinks)
- [Cross-host linking containers](/articles/ambassador_pattern_linking/)
diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile
index c17a6360e5..3513da4716 100644
--- a/docs/sources/examples/mongodb/Dockerfile
+++ b/docs/sources/examples/mongodb/Dockerfile
@@ -7,9 +7,8 @@ MAINTAINER Docker
# Installation:
# Import MongoDB public GPG key AND create a MongoDB list file
-RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv 7F0CEB10
-RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
-
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
# Update apt-get sources AND install MongoDB
RUN apt-get update && apt-get install -y mongodb-org
diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md
index 1b14c3a417..7450cd525d 100644
--- a/docs/sources/examples/running_riak_service.md
+++ b/docs/sources/examples/running_riak_service.md
@@ -56,8 +56,7 @@ After that, we modify Riak's configuration:
RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf
RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf
-Then, we expose the Riak Protocol Buffers and HTTP interfaces, along
-with SSH:
+Then, we expose the Riak Protocol Buffers and HTTP interfaces:
# Expose Riak Protocol Buffers and HTTP interfaces
EXPOSE 8087 8098
diff --git a/docs/sources/experimental/experimental.md b/docs/sources/experimental/experimental.md
new file mode 100644
index 0000000000..b0d72b90b0
--- /dev/null
+++ b/docs/sources/experimental/experimental.md
@@ -0,0 +1,51 @@
+page_title: Overview of Experimental Features
+page_keywords: experimental, Docker, feature
+
+# Experimental Features in this Release
+
+This page contains a list of features in the Docker engine which are
+experimental as of the current release. Experimental features are **not** ready
+for production. They are provided for test and evaluation in your sandbox
+environments.
+
+The information below describes each feature and the Github pull requests and
+issues associated with it. If necessary, links are provided to additional
+documentation on an issue. As an active Docker user and community member,
+please feel free to provide any feedback on these features you wish.
+
+## Install Docker experimental
+
+1. Verify that you have `wget` installed.
+
+ $ which wget
+
+ If `wget` isn't installed, install it after updating your manager:
+
+ $ sudo apt-get update
+ $ sudo apt-get install wget
+
+2. Get the latest Docker package.
+
+ $ wget -qO- https://experimental.docker.com/ | sh
+
+ The system prompts you for your `sudo` password. Then, it downloads and
+ installs Docker and its dependencies.
+
+ >**Note**: If your company is behind a filtering proxy, you may find that the
+ >`apt-key`
+ >command fails for the Docker repo during installation. To work around this,
+ >add the key directly using the following:
+ >
+ > $ wget -qO- https://experimental.docker.com/gpg | sudo apt-key add -
+
+3. Verify `docker` is installed correctly.
+
+ $ sudo docker run hello-world
+
+ This command downloads a test image and runs it in a container.
+
+## Experimental features in this Release
+
+* [Support for Docker plugins](plugins.md)
+* [Volume plugins](plugins_volume.md)
+
diff --git a/docs/sources/experimental/plugin_api.md b/docs/sources/experimental/plugin_api.md
new file mode 100644
index 0000000000..6454fc2a97
--- /dev/null
+++ b/docs/sources/experimental/plugin_api.md
@@ -0,0 +1,225 @@
+page_title: Plugin API documentation
+page_description: Documentation for writing a Docker plugin.
+page_keywords: docker, plugins, api, extensions
+
+# Experimental: Docker Plugin API
+
+Docker plugins are out-of-process extensions which add capabilities to the
+Docker Engine.
+
+This page is intended for people who want to develop their own Docker plugin.
+If you just want to learn about or use Docker plugins, look
+[here](/userguide/plugins).
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+## What plugins are
+
+A plugin is a process running on the same docker host as the docker daemon,
+which registers itself by placing a file in `/usr/share/docker/plugins` (the
+"plugin directory").
+
+Plugins have human-readable names, which are short, lowercase strings. For
+example, `flocker` or `weave`.
+
+Plugins can run inside or outside containers. Currently running them outside
+containers is recommended.
+
+## Plugin discovery
+
+Docker discovers plugins by looking for them in the plugin directory whenever a
+user or container tries to use one by name.
+
+There are two types of files which can be put in the plugin directory.
+
+* `.sock` files are UNIX domain sockets.
+* `.spec` files are text files containing a URL, such as `unix:///other.sock`.
+
+The name of the file (excluding the extension) determines the plugin name.
+
+For example, the `flocker` plugin might create a UNIX socket at
+`/usr/share/docker/plugins/flocker.sock`.
+
+Plugins must be run locally on the same machine as the Docker daemon. UNIX
+domain sockets are strongly encouraged for security reasons.
+
+## Plugin lifecycle
+
+Plugins should be started before Docker, and stopped after Docker. For
+example, when packaging a plugin for a platform which supports `systemd`, you
+might use [`systemd` dependencies](
+http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
+manage startup and shutdown order.
+
+When upgrading a plugin, you should first stop the Docker daemon, upgrade the
+plugin, then start Docker again.
+
+If a plugin is packaged as a container, this may cause issues. Plugins as
+containers are currently considered experimental due to these shutdown/startup
+ordering issues. These issues are mitigated by plugin retries (see below).
+
+## Plugin activation
+
+When a plugin is first referred to -- either by a user referring to it by name
+(e.g. `docker run --volume-driver=foo`) or a container already configured to
+use a plugin being started -- Docker looks for the named plugin in the plugin
+directory and activates it with a handshake. See Handshake API below.
+
+Plugins are *not* activated automatically at Docker daemon startup. Rather,
+they are activated only lazily, or on-demand, when they are needed.
+
+## API design
+
+The Plugin API is RPC-style JSON over HTTP, much like webhooks.
+
+Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to
+implement an HTTP server and bind this to the UNIX socket mentioned in the
+"plugin discovery" section.
+
+All requests are HTTP `POST` requests.
+
+The API is versioned via an Accept header, which currently is always set to
+`application/vnd.docker.plugins.v1+json`.
+
+## Handshake API
+
+Plugins are activated via the following "handshake" API call.
+
+### /Plugin.Activate
+
+**Request:** empty body
+
+**Response:**
+```
+{
+ "Implements": ["VolumeDriver"]
+}
+```
+
+Responds with a list of Docker subsystems which this plugin implements.
+After activation, the plugin will then be sent events from this subsystem.
+
+## Volume API
+
+If a plugin registers itself as a `VolumeDriver` (see above) then it is
+expected to provide writeable paths on the host filesystem for the Docker
+daemon to provide to containers to consume.
+
+The Docker daemon handles bind-mounting the provided paths into user
+containers.
+
+### /VolumeDriver.Create
+
+**Request**:
+```
+{
+ "Name": "volume_name"
+}
+```
+
+Instruct the plugin that the user wants to create a volume, given a user
+specified volume name. The plugin does not need to actually manifest the
+volume on the filesystem yet (until Mount is called).
+
+**Response**:
+```
+{
+ "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+### /VolumeDriver.Remove
+
+**Request**:
+```
+{
+ "Name": "volume_name"
+}
+```
+
+Create a volume, given a user specified volume name.
+
+**Response**:
+```
+{
+ "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+### /VolumeDriver.Mount
+
+**Request**:
+```
+{
+ "Name": "volume_name"
+}
+```
+
+Docker requires the plugin to provide a volume, given a user specified volume
+name. This is called once per container start.
+
+**Response**:
+```
+{
+ "Mountpoint": "/path/to/directory/on/host",
+ "Err": null
+}
+```
+
+Respond with the path on the host filesystem where the volume has been made
+available, and/or a string error if an error occurred.
+
+### /VolumeDriver.Path
+
+**Request**:
+```
+{
+ "Name": "volume_name"
+}
+```
+
+Docker needs reminding of the path to the volume on the host.
+
+**Response**:
+```
+{
+ "Mountpoint": "/path/to/directory/on/host",
+ "Err": null
+}
+```
+
+Respond with the path on the host filesystem where the volume has been made
+available, and/or a string error if an error occurred.
+
+### /VolumeDriver.Unmount
+
+**Request**:
+```
+{
+ "Name": "volume_name"
+}
+```
+
+Indication that Docker no longer is using the named volume. This is called once
+per container stop. Plugin may deduce that it is safe to deprovision it at
+this point.
+
+**Response**:
+```
+{
+ "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+## Plugin retries
+
+Attempts to call a method on a plugin are retried with an exponential backoff
+for up to 30 seconds. This may help when packaging plugins as containers, since
+it gives plugin containers a chance to start up before failing any user
+containers which depend on them.
diff --git a/docs/sources/experimental/plugins.md b/docs/sources/experimental/plugins.md
new file mode 100644
index 0000000000..dbcb70ce67
--- /dev/null
+++ b/docs/sources/experimental/plugins.md
@@ -0,0 +1,48 @@
+page_title: Experimental feature - Plugins
+page_keywords: experimental, Docker, plugins
+
+# Experimental: Extend Docker with a plugin
+
+You can extend the capabilities of the Docker Engine by loading third-party
+plugins.
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+## Types of plugins
+
+Plugins extend Docker's functionality. They come in specific types. For
+example, a [volume plugin](/experimental/plugins_volume) might enable Docker
+volumes to persist across multiple Docker hosts.
+
+Currently Docker supports volume plugins. In the future it will support
+additional plugin types.
+
+## Installing a plugin
+
+Follow the instructions in the plugin's documentation.
+
+## Finding a plugin
+
+The following plugins exist:
+
+* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
+which provides multi-host portable volumes for Docker, enabling you to run
+ databases and other stateful containers and move them around across a cluster
+ of machines.
+
+## Troubleshooting a plugin
+
+If you are having problems with Docker after loading a plugin, ask the authors
+of the plugin for help. The Docker team may not be able to assist you.
+
+## Writing a plugin
+
+If you are interested in writing a plugin for Docker, or seeing how they work
+under the hood, see the [docker plugins reference](/experimental/plugin_api).
+
+# Related GitHub PRs and issues
+
+- [#13222](https://github.com/docker/docker/pull/13222) Plugins plumbing
+
+Send us feedback and comments on [#13419](https://github.com/docker/docker/issues/13419),
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
diff --git a/docs/sources/experimental/plugins_volume.md b/docs/sources/experimental/plugins_volume.md
new file mode 100644
index 0000000000..399dda71e3
--- /dev/null
+++ b/docs/sources/experimental/plugins_volume.md
@@ -0,0 +1,45 @@
+page_title: Experimental feature - Volume plugins
+page_keywords: experimental, Docker, plugins, volume
+
+# Experimental: Docker volume plugins
+
+Docker volume plugins enable Docker deployments to be integrated with external
+storage systems, such as Amazon EBS, and enable data volumes to persist beyond
+the lifetime of a single Docker host. See the [plugin documentation](/experimental/plugins)
+for more information.
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+# Command-line changes
+
+This experimental features introduces two changes to the `docker run` command:
+
+- The `--volume-driver` flag is introduced.
+- The `-v` syntax is changed to accept a volume name a first component.
+
+Example:
+
+ $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
+
+By specifying a volume name in conjunction with a volume driver, volume plugins
+such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
+used to manage volumes external to a single host, such as those on EBS. In this
+example, "volumename" is passed through to the volume plugin as a user-given
+name for the volume which allows the plugin to associate it with an external
+volume beyond the lifetime of a single container or container host. This can be
+used, for example, to move a stateful container from one server to another.
+
+The `volumename` must not begin with a `/`.
+
+# API changes
+
+The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
+field of type `string` allowing to specify the name of the driver. It's default
+value of `"local"` (the default driver for local volumes).
+
+# Related GitHub PRs and issues
+
+- [#13161](https://github.com/docker/docker/pull/13161) Volume refactor and external volume plugins
+
+Send us feedback and comments on [#13420](https://github.com/docker/docker/issues/13420),
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
diff --git a/docs/sources/installation/SUSE.md b/docs/sources/installation/SUSE.md
index 756ed6b5c1..106d4cbe31 100644
--- a/docs/sources/installation/SUSE.md
+++ b/docs/sources/installation/SUSE.md
@@ -28,7 +28,7 @@ Docker is available in **SUSE Linux Enterprise 12 and later**. Please note that
due to its current limitations Docker is able to run only on **64 bit**
architecture.
-# Installation
+## Installation
Install the Docker package.
@@ -76,6 +76,20 @@ If you need to add an HTTP Proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, read our systemd article to
learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo zypper rm docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## What's next
Continue with the [User Guide](/userguide/).
diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md
index 99849c7aa0..570e36c482 100644
--- a/docs/sources/installation/archlinux.md
+++ b/docs/sources/installation/archlinux.md
@@ -30,13 +30,13 @@ in the packages. The core dependencies are:
For the normal package a simple
- pacman -S docker
+ $ sudo pacman -S docker
is all that is needed.
For the AUR package execute:
- yaourt -S docker-git
+ $ sudo yaourt -S docker-git
The instructions here assume **yaourt** is installed. See [Arch User
Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages)
@@ -59,3 +59,21 @@ To start on system boot:
If you need to add an HTTP Proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, read our systemd article to
learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo pacman -R docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo pacman -Rns docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md
index 7868f11b05..efebad503c 100644
--- a/docs/sources/installation/centos.md
+++ b/docs/sources/installation/centos.md
@@ -25,7 +25,10 @@ To run Docker on [CentOS-6.5](http://www.centos.org) or later, you will need
kernel version 2.6.32-431 or higher as this has specific kernel fixes to allow
Docker to run.
-## Installing Docker - CentOS-7
+## CentOS-7
+
+### Installation
+
Docker is included by default in the CentOS-Extras repository. To install
run the following command:
@@ -33,7 +36,23 @@ run the following command:
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
-## Installing Docker - CentOS-6.5
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## CentOS-6.5
+
+### Installation
For CentOS-6.5, the Docker package is part of [Extra Packages
for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL) repository,
@@ -57,6 +76,20 @@ Next, let's install the `docker-io` package which will install Docker on our hos
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Manual installation of latest Docker release
While using a package is the recommended way of installing Docker,
diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md
index d474aa52f8..e03715009f 100644
--- a/docs/sources/installation/cruxlinux.md
+++ b/docs/sources/installation/cruxlinux.md
@@ -15,9 +15,9 @@ The `docker` port will build and install the latest tagged version of Docker.
## Installation
-Assuming you have contrib enabled, update your ports tree and install docker (*as root*):
+Assuming you have contrib enabled, update your ports tree and install docker:
- # prt-get depinst docker
+ $ sudo prt-get depinst docker
## Kernel requirements
@@ -27,7 +27,7 @@ the necessary modules enabled for the Docker Daemon to function correctly.
Please read the `README`:
- $ prt-get readme docker
+ $ sudo prt-get readme docker
The `docker` port installs the `contrib/check-config.sh` script
provided by the Docker contributors for checking your kernel
@@ -39,9 +39,9 @@ To check your Kernel configuration run:
## Starting Docker
-There is a rc script created for Docker. To start the Docker service (*as root*):
+There is a rc script created for Docker. To start the Docker service:
- # /etc/rc.d/docker start
+ $ sudo /etc/rc.d/docker start
To start on system boot:
@@ -60,6 +60,20 @@ or use it as part of your `FROM` line in your `Dockerfile(s)`.
There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub.
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo prt-get remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Issues
If you have any issues please file a bug with the
diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md
index da9e5f59b1..883f920cdd 100644
--- a/docs/sources/installation/debian.md
+++ b/docs/sources/installation/debian.md
@@ -37,6 +37,24 @@ container runs, it prints an informational message. Then, it exits.
> If you want to enable memory and swap accounting see
> [this](/installation/ubuntulinux/#memory-and-swap-accounting).
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo apt-get purge docker-io
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo apt-get autoremove --purge docker-io
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Debian Wheezy/Stable 7.x (64-bit)
Docker requires Kernel 3.8+, while Wheezy ships with Kernel 3.2 (for more details
@@ -74,6 +92,24 @@ which is officially supported by Docker.
>
> $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo apt-get purge lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo apt-get autoremove --purge lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Giving non-root access
The `docker` daemon always runs as the `root` user and the `docker`
diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md
index ed4e8372a4..b3f23e4514 100644
--- a/docs/sources/installation/fedora.md
+++ b/docs/sources/installation/fedora.md
@@ -13,19 +13,37 @@ Currently the Fedora project will only support Docker when running on kernels
shipped by the distribution. There are kernel changes which will cause issues
if one decides to step outside that box and run non-distribution kernel packages.
-## Fedora 21 and later installation
+## Fedora 21 and later
-Install the `docker` package which will install Docker on our host.
+### Installation
+
+Install the Docker package which will install Docker on our host.
$ sudo yum -y install docker
-To update the `docker` package:
+To update the Docker package:
$ sudo yum -y update docker
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
-## Fedora 20 installation
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## Fedora 20
+
+### Installation
For `Fedora 20`, there is a package name conflict with a system tray application
and its executable, so the Docker RPM package was called `docker-io`.
@@ -36,12 +54,26 @@ package first.
$ sudo yum -y remove docker
$ sudo yum -y install docker-io
-To update the `docker` package:
+To update the Docker package:
$ sudo yum -y update docker-io
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Starting the Docker daemon
Now that it's installed, let's start the Docker daemon.
diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md
index 6b4db23b26..c700280346 100644
--- a/docs/sources/installation/frugalware.md
+++ b/docs/sources/installation/frugalware.md
@@ -28,7 +28,7 @@ in the packages. The core dependencies are:
A simple
- pacman -S lxc-docker
+ $ sudo pacman -S lxc-docker
is all that is needed.
@@ -48,3 +48,21 @@ To start on system boot:
If you need to add an HTTP Proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, read our systemd article to
learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo pacman -R lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo pacman -Rns lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md
index 716eab9d82..865e8eb008 100644
--- a/docs/sources/installation/gentoolinux.md
+++ b/docs/sources/installation/gentoolinux.md
@@ -95,3 +95,21 @@ To start on system boot:
If you need to add an HTTP Proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, read our systemd article to
learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo emerge -cav app-emulation/docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo emerge -C app-emulation/docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md
index 4b157c1682..89f75d2dd5 100644
--- a/docs/sources/installation/mac.md
+++ b/docs/sources/installation/mac.md
@@ -2,20 +2,19 @@ page_title: Installation on Mac OS X
page_description: Instructions for installing Docker on OS X using boot2docker.
page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualBox, SSH, Linux, OSX, OS X, Mac
-# Install Docker on Mac OS X
+# Mac OS X
You can install Docker using Boot2Docker to run `docker` commands at your command-line.
Choose this installation if you are familiar with the command-line or plan to
contribute to the Docker project on GitHub.
+[
](/kitematic/)
+
Alternatively, you may want to try Kitematic, an application that lets you set up Docker and
run containers using a graphical user interface (GUI).
-
-
-
## Command-line Docker with Boot2Docker
Because the Docker daemon uses Linux-specific kernel features, you can't run
@@ -55,17 +54,17 @@ When you start the `boot2docker` process, the VM is assigned an IP address. Unde
practice, work through the exercises on this page.
-### Install Boot2Docker
+### Installation
1. Go to the [boot2docker/osx-installer ](
-https://github.com/boot2docker/osx-installer/releases/latest) release page.
+ https://github.com/boot2docker/osx-installer/releases/latest) release page.
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
-section.
+ section.
3. Install Boot2Docker by double-clicking the package.
- The installer places Boot2Docker in your "Applications" folder.
+ The installer places Boot2Docker in your "Applications" folder.
The installation places the `docker` and `boot2docker` binaries in your
`/usr/local/bin` directory.
@@ -96,30 +95,32 @@ application:
Once the launch completes, you can run `docker` commands. A good way to verify
your setup succeeded is to run the `hello-world` container.
- $ docker run hello-world
- Unable to find image 'hello-world:latest' locally
- 511136ea3c5a: Pull complete
- 31cbccb51277: Pull complete
- e45a5af57b00: Pull complete
- hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.
- Status: Downloaded newer image for hello-world:latest
- Hello from Docker.
- This message shows that your installation appears to be working correctly.
+ $ docker run hello-world
+ Unable to find image 'hello-world:latest' locally
+ 511136ea3c5a: Pull complete
+ 31cbccb51277: Pull complete
+ e45a5af57b00: Pull complete
+ hello-world:latest: The image you are pulling has been verified.
+ Important: image verification is a tech preview feature and should not be
+ relied on to provide security.
+ Status: Downloaded newer image for hello-world:latest
+ Hello from Docker.
+ This message shows that your installation appears to be working correctly.
- To generate this message, Docker took the following steps:
- 1. The Docker client contacted the Docker daemon.
- 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
- (Assuming it was not already locally available.)
- 3. The Docker daemon created a new container from that image which runs the
- executable that produces the output you are currently reading.
- 4. The Docker daemon streamed that output to the Docker client, which sent it
- to your terminal.
+ To generate this message, Docker took the following steps:
+ 1. The Docker client contacted the Docker daemon.
+ 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
+ (Assuming it was not already locally available.)
+ 3. The Docker daemon created a new container from that image which runs the
+ executable that produces the output you are currently reading.
+ 4. The Docker daemon streamed that output to the Docker client, which sent it
+ to your terminal.
- To try something more ambitious, you can run an Ubuntu container with:
- $ docker run -it ubuntu bash
+ To try something more ambitious, you can run an Ubuntu container with:
+ $ docker run -it ubuntu bash
- For more examples and ideas, visit:
- http://docs.docker.com/userguide/
+ For more examples and ideas, visit:
+ http://docs.docker.com/userguide/
A more typical way to start and stop `boot2docker` is using the command line.
@@ -130,36 +131,36 @@ Initialize and run `boot2docker` from the command line, do the following:
1. Create a new Boot2Docker VM.
- $ boot2docker init
+ $ boot2docker init
- This creates a new virtual machine. You only need to run this command once.
+ This creates a new virtual machine. You only need to run this command once.
2. Start the `boot2docker` VM.
- $ boot2docker start
+ $ boot2docker start
3. Display the environment variables for the Docker client.
- $ boot2docker shellinit
- Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
- Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
- Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
- export DOCKER_HOST=tcp://192.168.59.103:2376
- export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
- export DOCKER_TLS_VERIFY=1
+ $ boot2docker shellinit
+ Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
+ Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
+ Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
+ export DOCKER_HOST=tcp://192.168.59.103:2376
+ export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
+ export DOCKER_TLS_VERIFY=1
- The specific paths and address on your machine will be different.
+ The specific paths and address on your machine will be different.
4. To set the environment variables in your shell do the following:
- $ eval "$(boot2docker shellinit)"
+ $ eval "$(boot2docker shellinit)"
- You can also set them manually by using the `export` commands `boot2docker`
- returns.
+ You can also set them manually by using the `export` commands `boot2docker`
+ returns.
5. Run the `hello-world` container to verify your setup.
- $ docker run hello-world
+ $ docker run hello-world
## Basic Boot2Docker exercises
@@ -167,8 +168,8 @@ Initialize and run `boot2docker` from the command line, do the following:
At this point, you should have `boot2docker` running and the `docker` client
environment initialized. To verify this, run the following commands:
- $ boot2docker status
- $ docker version
+ $ boot2docker status
+ $ docker version
Work through this section to try some practical container tasks using `boot2docker` VM.
@@ -176,52 +177,52 @@ Work through this section to try some practical container tasks using `boot2dock
1. Start an NGINX container on the DOCKER_HOST.
- $ docker run -d -P --name web nginx
+ $ docker run -d -P --name web nginx
- Normally, the `docker run` commands starts a container, runs it, and then
- exits. The `-d` flag keeps the container running in the background
- after the `docker run` command completes. The `-P` flag publishes exposed ports from the
- container to your local host; this lets you access them from your Mac.
+ Normally, the `docker run` commands starts a container, runs it, and then
+ exits. The `-d` flag keeps the container running in the background
+ after the `docker run` command completes. The `-P` flag publishes exposed ports from the
+ container to your local host; this lets you access them from your Mac.
2. Display your running container with `docker ps` command
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 5fb65ff765e9 nginx:latest "nginx -g 'daemon of 3 minutes ago Up 3 minutes 0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp web
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ 5fb65ff765e9 nginx:latest "nginx -g 'daemon of 3 minutes ago Up 3 minutes 0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp web
- At this point, you can see `nginx` is running as a daemon.
+ At this point, you can see `nginx` is running as a daemon.
3. View just the container's ports.
- $ docker port web
- 443/tcp -> 0.0.0.0:49156
- 80/tcp -> 0.0.0.0:49157
+ $ docker port web
+ 443/tcp -> 0.0.0.0:49156
+ 80/tcp -> 0.0.0.0:49157
- This tells you that the `web` container's port `80` is mapped to port
- `49157` on your Docker host.
+ This tells you that the `web` container's port `80` is mapped to port
+ `49157` on your Docker host.
4. Enter the `http://localhost:49157` address (`localhost` is `0.0.0.0`) in your browser:
- 
+ 
- This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
- not the localhost address (0.0.0.0) but is instead the address of the
- `boot2docker` VM.
+ This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
+ not the localhost address (0.0.0.0) but is instead the address of the
+ `boot2docker` VM.
5. Get the address of the `boot2docker` VM.
- $ boot2docker ip
- 192.168.59.103
+ $ boot2docker ip
+ 192.168.59.103
6. Enter the `http://192.168.59.103:49157` address in your browser:
- 
+ 
- Success!
+ Success!
7. To stop and then remove your running `nginx` container, do the following:
- $ docker stop web
- $ docker rm web
+ $ docker stop web
+ $ docker rm web
### Mount a volume on the container
@@ -231,46 +232,46 @@ The next exercise demonstrates how to do this.
1. Change to your user `$HOME` directory.
- $ cd $HOME
+ $ cd $HOME
2. Make a new `site` directory.
- $ mkdir site
+ $ mkdir site
3. Change into the `site` directory.
- $ cd site
+ $ cd site
4. Create a new `index.html` file.
- $ echo "my new site" > index.html
+ $ echo "my new site" > index.html
5. Start a new `nginx` container and replace the `html` folder with your `site` directory.
- $ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
+ $ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
6. Get the `mysite` container's port.
- $ docker port mysite
- 80/tcp -> 0.0.0.0:49166
- 443/tcp -> 0.0.0.0:49165
+ $ docker port mysite
+ 80/tcp -> 0.0.0.0:49166
+ 443/tcp -> 0.0.0.0:49165
7. Open the site in a browser:
- 
+ 
8. Try adding a page to your `$HOME/site` in real time.
- $ echo "This is cool" > cool.html
+ $ echo "This is cool" > cool.html
9. Open the new page in the browser.
- 
+ 
9. Stop and then remove your running `mysite` container.
- $ docker stop mysite
- $ docker rm mysite
+ $ docker stop mysite
+ $ docker rm mysite
## Upgrade Boot2Docker
@@ -286,11 +287,11 @@ To upgrade from 1.4.1 or greater, you can do this:
2. Stop the `boot2docker` application.
- $ boot2docker stop
+ $ boot2docker stop
3. Run the upgrade command.
- $ boot2docker upgrade
+ $ boot2docker upgrade
### Use the installer
@@ -301,22 +302,46 @@ To upgrade any version of Boot2Docker, do this:
2. Stop the `boot2docker` application.
- $ boot2docker stop
+ $ boot2docker stop
3. Go to the [boot2docker/osx-installer ](
https://github.com/boot2docker/osx-installer/releases/latest) release page.
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
-section.
+ section.
2. Install Boot2Docker by double-clicking the package.
- The installer places Boot2Docker in your "Applications" folder.
+ The installer places Boot2Docker in your "Applications" folder.
+
+
+## Uninstallation
+
+1. Go to the [boot2docker/osx-installer ](
+ https://github.com/boot2docker/osx-installer/releases/latest) release page.
+
+2. Download the source code by clicking `Source code (zip)` or
+ `Source code (tar.gz)` in the "Downloads" section.
+
+3. Extract the source code.
+
+4. Open a terminal on your local machine.
+
+5. Change to the directory where you extracted the source code:
+
+ $ cd
+
+6. Make sure the uninstall.sh script is executable:
+
+ $ chmod +x uninstall.sh
+
+7. Run the uninstall.sh script:
+
+ $ ./uninstall.sh
## Learning more and acknowledgement
-
Use `boot2docker help` to list the full command line reference. For more
information about using SSH or SCP to access the Boot2Docker VM, see the README
at [Boot2Docker repository](https://github.com/boot2docker/boot2docker).
diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md
index e05e664c12..e74decd9b9 100644
--- a/docs/sources/installation/oracle.md
+++ b/docs/sources/installation/oracle.md
@@ -43,35 +43,35 @@ To enable the *addons* repository:
`/etc/yum.repos.d/public-yum-ol7.repo`
and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza.
-## To install Docker:
+## Installation
1. Ensure the appropriate *addons* channel or repository has been enabled.
2. Use yum to install the Docker package:
- $ sudo yum install docker
+ $ sudo yum install docker
-## To start Docker:
+## Starting Docker
1. Now that it's installed, start the Docker daemon:
- 1. On Oracle Linux 6:
+ 1. On Oracle Linux 6:
- $ sudo service docker start
+ $ sudo service docker start
- 2. On Oracle Linux 7:
+ 2. On Oracle Linux 7:
- $ sudo systemctl start docker.service
+ $ sudo systemctl start docker.service
2. If you want the Docker daemon to start automatically at boot:
- 1. On Oracle Linux 6:
+ 1. On Oracle Linux 6:
- $ sudo chkconfig docker on
+ $ sudo chkconfig docker on
- 2. On Oracle Linux 7:
+ 2. On Oracle Linux 7:
- $ sudo systemctl enable docker.service
+ $ sudo systemctl enable docker.service
**Done!**
@@ -99,6 +99,20 @@ To enable btrfs support on Oracle Linux:
You can now continue with the [Docker User Guide](/userguide/).
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Known issues
### Docker unmounts btrfs filesystem on shutdown
diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md
index b3bd7aa1d0..9b17346921 100644
--- a/docs/sources/installation/rhel.md
+++ b/docs/sources/installation/rhel.md
@@ -16,7 +16,9 @@ running on kernels shipped by the distribution. There are kernel changes which
will cause issues if one decides to step outside that box and run
non-distribution kernel packages.
-## Red Hat Enterprise Linux 7 installation
+## Red Hat Enterprise Linux 7
+
+### Installation
**Red Hat Enterprise Linux 7 (64 bit)** has [shipped with
Docker](https://access.redhat.com/site/products/red-hat-enterprise-linux/docker-and-containers).
@@ -41,7 +43,21 @@ Portal](https://access.redhat.com/).
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
-## Red Hat Enterprise Linux 6.6 installation
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## Red Hat Enterprise Linux 6.6
You will need **64 bit** [RHEL
6.6](https://access.redhat.com/site/articles/3078#RHEL6) or later, with
@@ -66,7 +82,7 @@ non-distro kernel packages.
> vulnerabilities and severe bugs (such as those found in kernel 2.6.32)
> are fixed.
-## Installation
+### Installation
Firstly, you need to install the EPEL repository. Please follow the
[EPEL installation
@@ -90,6 +106,20 @@ To update the `docker-io` package
Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
+### Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
## Starting the Docker daemon
Now that it's installed, let's start the Docker daemon.
@@ -118,7 +148,6 @@ If you need to add an HTTP Proxy, set a different directory or partition for the
Docker runtime files, or make other customizations, read our Systemd article to
learn how to [customize your Systemd Docker daemon options](/articles/systemd/).
-
## Issues?
If you have any issues - please report them directly in the
diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md
index 75b3c9fb68..652edc9fd8 100644
--- a/docs/sources/installation/ubuntulinux.md
+++ b/docs/sources/installation/ubuntulinux.md
@@ -28,8 +28,8 @@ and frequently panic under certain conditions.
To check your current kernel version, open a terminal and use `uname -r` to display
your kernel version:
- $ uname -r
- 3.11.0-15-generic
+ $ uname -r
+ 3.11.0-15-generic
>**Caution** Some Ubuntu OS versions **require a version higher than 3.10** to
>run Docker, see the prerequisites on this page that apply to your Ubuntu
@@ -72,17 +72,17 @@ To upgrade your kernel and install the additional packages, do the following:
2. Update your package manager.
- $ sudo apt-get update
+ $ sudo apt-get update
3. Install both the required and optional packages.
- $ sudo apt-get install linux-image-generic-lts-trusty
+ $ sudo apt-get install linux-image-generic-lts-trusty
- Depending on your environment, you may install more as described in the preceding table.
+ Depending on your environment, you may install more as described in the preceding table.
4. Reboot your host.
- $ sudo reboot
+ $ sudo reboot
5. After your system reboots, go ahead and [install Docker](#installing-docker-on-ubuntu).
@@ -92,7 +92,7 @@ To upgrade your kernel and install the additional packages, do the following:
Docker uses AUFS as the default storage backend. If you don't have this
prerequisite installed, Docker's installation process adds it.
-##Installing Docker on Ubuntu
+##Installation
Make sure you have installed the prerequisites for your Ubuntu version. Then,
install Docker using the following:
@@ -101,19 +101,19 @@ install Docker using the following:
2. Verify that you have `wget` installed.
- $ which wget
+ $ which wget
- If `wget` isn't installed, install it after updating your manager:
+ If `wget` isn't installed, install it after updating your manager:
- $ sudo apt-get update
- $ sudo apt-get install wget
+ $ sudo apt-get update
+ $ sudo apt-get install wget
3. Get the latest Docker package.
- $ wget -qO- https://get.docker.com/ | sh
+ $ wget -qO- https://get.docker.com/ | sh
- The system prompts you for your `sudo` password. Then, it downloads and
- installs Docker and its dependencies.
+ The system prompts you for your `sudo` password. Then, it downloads and
+ installs Docker and its dependencies.
>**Note**: If your company is behind a filtering proxy, you may find that the
>`apt-key`
>command fails for the Docker repo during installation. To work around this,
@@ -123,9 +123,9 @@ install Docker using the following:
4. Verify `docker` is installed correctly.
- $ sudo docker run hello-world
+ $ sudo docker run hello-world
- This command downloads a test image and runs it in a container.
+ This command downloads a test image and runs it in a container.
## Optional configurations for Docker on Ubuntu
@@ -155,19 +155,19 @@ To create the `docker` group and add your user:
1. Log into Ubuntu as a user with `sudo` privileges.
- This procedure assumes you log in as the `ubuntu` user.
+ This procedure assumes you log in as the `ubuntu` user.
3. Create the `docker` group and add your user.
- $ sudo usermod -aG docker ubuntu
+ $ sudo usermod -aG docker ubuntu
3. Log out and log back in.
- This ensures your user is running with the correct permissions.
+ This ensures your user is running with the correct permissions.
4. Verify your work by running `docker` without `sudo`.
- $ docker run hello-world
+ $ docker run hello-world
### Adjust memory and swap accounting
@@ -187,13 +187,13 @@ following.
3. Set the `GRUB_CMDLINE_LINUX` value as follows:
- GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
+ GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
4. Save and close the file.
5. Update GRUB.
- $ sudo update-grub
+ $ sudo update-grub
6. Reboot your system.
@@ -216,25 +216,25 @@ To configure UFW and allow incoming connections on the Docker port:
2. Verify that UFW is installed and enabled.
- $ sudo ufw status
+ $ sudo ufw status
3. Open the `/etc/default/ufw` file for editing.
- $ sudo nano /etc/default/ufw
+ $ sudo nano /etc/default/ufw
4. Set the `DEFAULT_FORWARD_POLICY` policy to:
- DEFAULT_FORWARD_POLICY="ACCEPT"
+ DEFAULT_FORWARD_POLICY="ACCEPT"
5. Save and close the file.
6. Reload UFW to use the new setting.
- $ sudo ufw reload
+ $ sudo ufw reload
7. Allow incoming connections on the Docker port.
- $ sudo ufw allow 2375/tcp
+ $ sudo ufw allow 2375/tcp
### Configure a DNS server for use by Docker
@@ -262,25 +262,25 @@ To specify a DNS server for use by Docker:
2. Open the `/etc/default/docker` file for editing.
- $ sudo nano /etc/default/docker
+ $ sudo nano /etc/default/docker
3. Add a setting for Docker.
- DOCKER_OPTS="--dns 8.8.8.8"
+ DOCKER_OPTS="--dns 8.8.8.8"
Replace `8.8.8.8` with a local DNS server such as `192.168.1.1`. You can also
specify multiple DNS servers. Separated them with spaces, for example:
- --dns 8.8.8.8 --dns 192.168.1.1
+ --dns 8.8.8.8 --dns 192.168.1.1
- >**Warning**: If you're doing this on a laptop which connects to various
- >networks, make sure to choose a public DNS server.
+ >**Warning**: If you're doing this on a laptop which connects to various
+ >networks, make sure to choose a public DNS server.
4. Save and close the file.
5. Restart the Docker daemon.
- $ sudo restart docker
+ $ sudo restart docker
@@ -291,22 +291,39 @@ NetworkManager (this might slow your network).
1. Open the `/etc/NetworkManager/NetworkManager.conf` file for editing.
- $ sudo nano /etc/NetworkManager/NetworkManager.conf
+ $ sudo nano /etc/NetworkManager/NetworkManager.conf
2. Comment out the `dns=dsnmasq` line:
- dns=dnsmasq
+ dns=dnsmasq
3. Save and close the file.
4. Restart both the NetworkManager and Docker.
- $ sudo restart network-manager $ sudo restart docker
+ $ sudo restart network-manager $ sudo restart docker
## Upgrade Docker
-To install the latest version of Docker, use the standard `-N` flag with `wget`:
+To install the latest version of Docker with `wget`:
- $ wget -N -qO- https://get.docker.com/ | sh
+ $ wget -qO- https://get.docker.com/ | sh
+## Uninstallation
+
+To uninstall the Docker package:
+
+ $ sudo apt-get purge lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+ $ sudo apt-get autoremove --purge lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+ $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md
index fd3cc7eb4a..b5a148417a 100644
--- a/docs/sources/installation/windows.md
+++ b/docs/sources/installation/windows.md
@@ -67,7 +67,7 @@ Boot2Docker command requires `ssh.exe` to be in the PATH, therefore we need to
include `bin` folder of the Git installation (which has ssh.exe) to the `%PATH%`
environment variable by running:
- set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
+ set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
and then we can run the `boot2docker start` command to start the Boot2Docker VM.
(Run `boot2docker init` command if you get an error saying machine does not
@@ -81,7 +81,7 @@ to your console window and you are ready to run docker commands such as
Launch a PowerShell window, then you need to add `ssh.exe` to your PATH:
- $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
+ $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
and after running `boot2docker start` command it will print PowerShell commands
to set the environment variables to connect Docker running inside VM. Run these
@@ -150,6 +150,12 @@ You can do this with
- then click: "Save Private Key".
- Then use the saved file to login with PuTTY using `docker@127.0.0.1:2022`.
+## Uninstallation
+
+You can uninstall Boot2Docker using Window's standard process for removing programs.
+This process does not remove the `docker-install.exe` file. You must delete that file
+yourself.
+
## References
If you have Docker hosts running and if you don't wish to do a
diff --git a/docs/sources/project/software-req-win.md b/docs/sources/project/software-req-win.md
index a7f1378929..38cd73d10f 100644
--- a/docs/sources/project/software-req-win.md
+++ b/docs/sources/project/software-req-win.md
@@ -15,7 +15,7 @@ you must have:
- MinGW (tar and xz)
- Go language
-> **Note**: This installation prcedure refers to the `C:\` drive. If you system's main drive
+> **Note**: This installation procedure refers to the `C:\` drive. If you system's main drive
is `D:\` you'll need to substitute that in where appropriate in these
instructions.
@@ -56,7 +56,7 @@ Docker Go code as you develop.
1. Browse to
[tdm-gcc download page](http://tdm-gcc.tdragon.net/download).
-2. Click on the lastest 64-bit version of the package.
+2. Click on the latest 64-bit version of the package.
Windows prompts you to save the file to your machine
@@ -166,7 +166,7 @@ In this section, you install the Go language. Then, you build the source so that
3. Run the installer.
- The system opens the **Go Programming Langauge Setup** dialog.
+ The system opens the **Go Programming Language Setup** dialog.
4. Select all the defaults to install.
diff --git a/docs/sources/project/test-and-docs.md b/docs/sources/project/test-and-docs.md
index 23b6b0914d..bcf4167ff2 100644
--- a/docs/sources/project/test-and-docs.md
+++ b/docs/sources/project/test-and-docs.md
@@ -67,10 +67,6 @@ is simply `test`. The make file contains several targets for testing:
test-unit |
Run just the unit tests. |
-
- test-integration |
- Run just integration tests. |
-
test-integration-cli |
Run the test for the integration command line interface. |
@@ -143,7 +139,7 @@ Try this now.
3. Run the tests using the `hack/make.sh` script.
- root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration test-integration-cli test-docker-py
+ root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py
The tests run just as they did within your local host.
@@ -164,11 +160,11 @@ You can use the `TESTFLAGS` environment variable to run a single test. The
flag's value is passed as arguments to the `go test` command. For example, from
your local host you can run the `TestBuild` test with this command:
- $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test
+ $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli
To run the same test inside your Docker development container, you do this:
- root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh
+ root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli
## If tests under Boot2Docker fail due to disk space errors
diff --git a/docs/sources/reference.md b/docs/sources/reference.md
index 6c1ab462d4..8cfe304672 100644
--- a/docs/sources/reference.md
+++ b/docs/sources/reference.md
@@ -3,6 +3,7 @@
## Contents:
- [Commands](commandline/)
+ - [Logging drivers](logging/)
- [Dockerfile Reference](builder/)
- [Docker Run Reference](run/)
- [APIs](api/)
diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md
index d92084f29b..75ce93ceb5 100644
--- a/docs/sources/reference/api/docker_remote_api.md
+++ b/docs/sources/reference/api/docker_remote_api.md
@@ -38,6 +38,22 @@ Calling `/info` is the same as calling
You can still call an old version of the API using
`/v1.18/info`.
+## Docker Events
+
+The following diagram depicts the container states accessible through the API.
+
+
+
+Some container-related events are not affected by container state, so they are not included in this diagram. These events are:
+
+* **export** emitted by `docker export`
+* **exec_create** emitted by `docker exec`
+* **exec_start** emitted by `docker exec` after **exec_create**
+
+Running `docker rmi` emits an **untag** event when removing an image name. The `rmi` command may also emit **delete** events when images are deleted by ID directly or by deleting the last tag referring to the image.
+
+> **Acknowledgement**: This diagram and the accompanying text were used with the permission of Matt Good and Gilder Labs. See Matt's original blog post [Docker Events Explained](http://gliderlabs.com/blog/2015/04/14/docker-events-explained/).
+
## v1.19
### Full documentation
@@ -46,6 +62,32 @@ You can still call an old version of the API using
### What's new
+**New!**
+When the daemon detects a version mismatch with the client, usually when
+the client is newer than the daemon, an HTTP 400 is now returned instead
+of a 404.
+
+`GET /containers/(id)/stats`
+
+**New!**
+You can now supply a `stream` bool to get only one set of stats and
+disconnect
+
+`GET /containers(id)/logs`
+
+**New!**
+
+This endpoint now accepts a `since` timestamp parameter.
+
+`GET /info`
+
+**New!**
+
+The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and `SwapLimit`
+are now returned as boolean instead of as an int.
+
+In addition, the end point now returns the new boolean fields
+`CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`.
## v1.18
diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md
index 8fcf8cb187..e4fe5074d9 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.15.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.15.md
@@ -207,8 +207,8 @@ Json Parameters:
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
- - **Links** - A list of links for the container. Each link entry should be of
- of the form "container_name:alias".
+ - **Links** - A list of links for the container. Each link entry should be
+ in the form of "container_name:alias".
- **LxcConf** - LXC specific configurations. These configurations will only
work when using the `lxc` execution driver.
- **PortBindings** - A map of exposed container ports and the host port they
diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md
index 9c6159b9d5..df8e5be13e 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.16.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.16.md
@@ -207,8 +207,8 @@ Json Parameters:
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
- - **Links** - A list of links for the container. Each link entry should be of
- of the form "container_name:alias".
+ - **Links** - A list of links for the container. Each link entry should be
+ in the form of "container_name:alias".
- **LxcConf** - LXC specific configurations. These configurations will only
work when using the `lxc` execution driver.
- **PortBindings** - A map of exposed container ports and the host port they
diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md
index 80f4fccf00..d8ef81c0fb 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.17.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.17.md
@@ -207,8 +207,8 @@ Json Parameters:
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
- - **Links** - A list of links for the container. Each link entry should be of
- of the form "container_name:alias".
+ - **Links** - A list of links for the container. Each link entry should be
+ in the form of "container_name:alias".
- **LxcConf** - LXC specific configurations. These configurations will only
work when using the `lxc` execution driver.
- **PortBindings** - A map of exposed container ports and the host port they
diff --git a/docs/sources/reference/api/docker_remote_api_v1.18.md b/docs/sources/reference/api/docker_remote_api_v1.18.md
index a91ca8417a..2f6f2aadaf 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.18.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.18.md
@@ -218,8 +218,8 @@ Json Parameters:
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
- - **Links** - A list of links for the container. Each link entry should be of
- of the form `container_name:alias`.
+ - **Links** - A list of links for the container. Each link entry should be
+ in the form of `container_name:alias`.
- **LxcConf** - LXC specific configurations. These configurations will only
work when using the `lxc` execution driver.
- **PortBindings** - A map of exposed container ports and the host port they
@@ -1594,35 +1594,50 @@ Display system-wide information
Content-Type: application/json
{
- "Containers":11,
- "Images":16,
- "Driver":"btrfs",
- "DriverStatus": [[""]],
- "ExecutionDriver":"native-0.1",
- "KernelVersion":"3.12.0-1-amd64"
- "NCPU":1,
- "MemTotal":2099236864,
- "Name":"prod-server-42",
- "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
- "Debug":false,
- "NFd": 11,
- "NGoroutines":21,
- "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
- "NEventsListener":0,
- "InitPath":"/usr/bin/docker",
- "InitSha1":"",
- "IndexServerAddress":["https://index.docker.io/v1/"],
- "MemoryLimit":true,
- "SwapLimit":false,
- "IPv4Forwarding":true,
- "Labels":["storage=ssd"],
- "DockerRootDir": "/var/lib/docker",
- "HttpProxy": "http://test:test@localhost:8080"
- "HttpsProxy": "https://test:test@localhost:8080"
- "NoProxy": "9.81.1.160"
- "OperatingSystem": "Boot2Docker",
+ "Containers": 11,
+ "Debug": 0,
+ "DockerRootDir": "/var/lib/docker",
+ "Driver": "btrfs",
+ "DriverStatus": [[""]],
+ "ExecutionDriver": "native-0.1",
+ "HttpProxy": "http://test:test@localhost:8080",
+ "HttpsProxy": "https://test:test@localhost:8080",
+ "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
+ "IPv4Forwarding": 1,
+ "Images": 16,
+ "IndexServerAddress": "https://index.docker.io/v1/",
+ "InitPath": "/usr/bin/docker",
+ "InitSha1": "",
+ "KernelVersion": "3.12.0-1-amd64",
+ "Labels": [
+ "storage=ssd"
+ ],
+ "MemTotal": 2099236864,
+ "MemoryLimit": 1,
+ "NCPU": 1,
+ "NEventsListener": 0,
+ "NFd": 11,
+ "NGoroutines": 21,
+ "Name": "prod-server-42",
+ "NoProxy": "9.81.1.160",
+ "OperatingSystem": "Boot2Docker",
+ "RegistryConfig": {
+ "IndexConfigs": {
+ "docker.io": {
+ "Mirrors": null,
+ "Name": "docker.io",
+ "Official": true,
+ "Secure": true
+ }
+ },
+ "InsecureRegistryCIDRs": [
+ "127.0.0.0/8"
+ ]
+ },
+ "SwapLimit": 0,
+ "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
}
-
+
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.19.md b/docs/sources/reference/api/docker_remote_api_v1.19.md
index cede2e1073..dde8ee79a1 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.19.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.19.md
@@ -13,6 +13,8 @@ page_keywords: API, Docker, rcli, REST, documentation
- The API tends to be REST, but for some complex commands, like `attach`
or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
`STDIN` and `STDERR`.
+ - When the client API version is newer than the daemon's an HTTP
+ `400 Bad Request` error message is returned.
# 2. Endpoints
@@ -147,8 +149,11 @@ Create a container
"Memory": 0,
"MemorySwap": 0,
"CpuShares": 512,
+ "CpuPeriod": 100000,
"CpusetCpus": "0,1",
"CpusetMems": "0,1",
+ "BlkioWeight": 300,
+ "OomKillDisable": false,
"PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts": false,
"Privileged": false,
@@ -191,9 +196,12 @@ Json Parameters:
always use this with `memory`, and make the value larger than `memory`.
- **CpuShares** - An integer value containing the CPU Shares for container
(ie. the relative weight vs other containers).
+- **CpuPeriod** - The length of a CPU period (in microseconds).
- **Cpuset** - The same as CpusetCpus, but deprecated, please don't use.
- **CpusetCpus** - String value containing the cgroups CpusetCpus to use.
- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+- **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+- **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
- **AttachStdin** - Boolean value, attaches to stdin.
- **AttachStdout** - Boolean value, attaches to stdout.
- **AttachStderr** - Boolean value, attaches to stderr.
@@ -220,8 +228,8 @@ Json Parameters:
volume for the container), `host_path:container_path` (to bind-mount
a host path into the container), or `host_path:container_path:ro`
(to make the bind-mount read-only inside the container).
- - **Links** - A list of links for the container. Each link entry should be of
- of the form `container_name:alias`.
+ - **Links** - A list of links for the container. Each link entry should be
+ in the form of `container_name:alias`.
- **LxcConf** - LXC specific configurations. These configurations will only
work when using the `lxc` execution driver.
- **PortBindings** - A map of exposed container ports and the host port they
@@ -339,12 +347,14 @@ Return low-level information on the container `id`
"ExecIDs": null,
"HostConfig": {
"Binds": null,
+ "BlkioWeight": 0,
"CapAdd": null,
"CapDrop": null,
"ContainerIDFile": "",
"CpusetCpus": "",
"CpusetMems": "",
"CpuShares": 0,
+ "CpuPeriod": 100000,
"Devices": [],
"Dns": null,
"DnsSearch": null,
@@ -354,6 +364,7 @@ Return low-level information on the container `id`
"LxcConf": [],
"Memory": 0,
"MemorySwap": 0,
+ "OomKillDisable": false,
"NetworkMode": "bridge",
"PortBindings": {},
"Privileged": false,
@@ -468,7 +479,7 @@ Get stdout and stderr logs from the container ``id``
**Example request**:
- GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1
+ GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1
**Example response**:
@@ -484,6 +495,8 @@ Query Parameters:
- **follow** – 1/True/true or 0/False/false, return stream. Default false
- **stdout** – 1/True/true or 0/False/false, show stdout log. Default false
- **stderr** – 1/True/true or 0/False/false, show stderr log. Default false
+- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp
+ will only output log-entries since that timestamp. Default: 0 (unfiltered)
- **timestamps** – 1/True/true or 0/False/false, print timestamps for
every log line. Default false
- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all
@@ -644,6 +657,10 @@ This endpoint returns a live stream of a container's resource usage statistics.
}
}
+Query Parameters:
+
+- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default true
+
Status Codes:
- **200** – no error
@@ -1597,33 +1614,52 @@ Display system-wide information
Content-Type: application/json
{
- "Containers":11,
- "Images":16,
- "Driver":"btrfs",
- "DriverStatus": [[""]],
- "ExecutionDriver":"native-0.1",
- "KernelVersion":"3.12.0-1-amd64"
- "NCPU":1,
- "MemTotal":2099236864,
- "Name":"prod-server-42",
- "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
- "Debug":false,
- "NFd": 11,
- "NGoroutines":21,
- "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
- "NEventsListener":0,
- "InitPath":"/usr/bin/docker",
- "InitSha1":"",
- "IndexServerAddress":["https://index.docker.io/v1/"],
- "MemoryLimit":true,
- "SwapLimit":false,
- "IPv4Forwarding":true,
- "Labels":["storage=ssd"],
- "DockerRootDir": "/var/lib/docker",
- "HttpProxy": "http://test:test@localhost:8080"
- "HttpsProxy": "https://test:test@localhost:8080"
- "NoProxy": "9.81.1.160"
- "OperatingSystem": "Boot2Docker",
+ "Containers": 11,
+ "CpuCfsPeriod": true,
+ "CpuCfsQuota": true,
+ "Debug": false,
+ "DockerRootDir": "/var/lib/docker",
+ "Driver": "btrfs",
+ "DriverStatus": [[""]],
+ "ExecutionDriver": "native-0.1",
+ "ExperimentalBuild": false,
+ "HttpProxy": "http://test:test@localhost:8080",
+ "HttpsProxy": "https://test:test@localhost:8080",
+ "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
+ "IPv4Forwarding": true,
+ "Images": 16,
+ "IndexServerAddress": "https://index.docker.io/v1/",
+ "InitPath": "/usr/bin/docker",
+ "InitSha1": "",
+ "KernelVersion": "3.12.0-1-amd64",
+ "Labels": [
+ "storage=ssd"
+ ],
+ "MemTotal": 2099236864,
+ "MemoryLimit": true,
+ "NCPU": 1,
+ "NEventsListener": 0,
+ "NFd": 11,
+ "NGoroutines": 21,
+ "Name": "prod-server-42",
+ "NoProxy": "9.81.1.160",
+ "OomKillDisable": true,
+ "OperatingSystem": "Boot2Docker",
+ "RegistryConfig": {
+ "IndexConfigs": {
+ "docker.io": {
+ "Mirrors": null,
+ "Name": "docker.io",
+ "Official": true,
+ "Secure": true
+ }
+ },
+ "InsecureRegistryCIDRs": [
+ "127.0.0.0/8"
+ ]
+ },
+ "SwapLimit": false,
+ "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
}
Status Codes:
diff --git a/docs/sources/reference/api/images/event_state.gliffy b/docs/sources/reference/api/images/event_state.gliffy
new file mode 100644
index 0000000000..2eb0f3a5d9
--- /dev/null
+++ b/docs/sources/reference/api/images/event_state.gliffy
@@ -0,0 +1 @@
+{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":1109,"height":539,"nodeIndex":335,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":26.46762966848334,"y":100},"max":{"x":1109,"y":538.0017856687341}},"objects":[{"x":83.0,"y":251.0,"rotation":0.0,"id":328,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":328,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":188,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-52.03237033151666,-0.9999999999999716],[87.0,-1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":332,"width":67.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5233416311379174,"linePerpValue":null,"cardinalityType":null,"html":"docker run
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":74.0,"y":318.0,"rotation":0.0,"id":327,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":327,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-42.0,1.0],[96.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":333,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5689443767164591,"linePerpValue":null,"cardinalityType":null,"html":"docker create
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":191.0,"y":409.0,"rotation":0.0,"id":325,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":325,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":215,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-21.0,1.0],[-61.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":331.0,"y":346.0,"rotation":0.0,"id":320,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":320,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-21.0,1.0],[-53.5,1.0],[-53.5,64.0],[-86.0,64.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":324,"width":63.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"docker rm
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":265.0,"y":245.0,"rotation":0.0,"id":319,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":319,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":188,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-20.0,5.0],[312.5,5.0],[312.5,55.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":872.0,"y":503.0,"rotation":0.0,"id":310,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":310,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":205,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-60.03300858899104,-53.0],[-148.0,-151.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":735.0,"y":341.0,"rotation":0.0,"id":307,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":307,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":203,"py":0.2928932188134525,"px":1.1102230246251563E-16}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[137.5,60.7157287525381]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":309,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37922003257116654,"linePerpValue":null,"cardinalityType":null,"html":"docker pause
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1023.0,"y":446.0,"rotation":0.0,"id":298,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":298,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":213,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":205,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,-1.0],[39.5,24.0],[-158.0,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":313,"width":101.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37286693198126664,"linePerpValue":null,"cardinalityType":null,"html":" docker unpause
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":904.0,"y":434.0,"rotation":0.0,"id":295,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":295,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":203,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":213,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[43.5,-24.0],[123.5,-24.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":411.0,"y":419.0,"rotation":0.0,"id":291,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":291,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.2659812842322253,1.0],[3.2659812842322253,-32.0],[-81.46803743153555,-32.0],[-81.46803743153555,-65.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":292,"width":21.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4870188236535277,"linePerpValue":null,"cardinalityType":null,"html":"No
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":415.0,"y":419.0,"rotation":0.0,"id":289,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":289,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-0.7340187157677747,1.0],[-0.7340187157677747,-32.5],[162.5,-32.5],[162.5,-79.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":290,"width":26.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.46753493572435184,"linePerpValue":null,"cardinalityType":null,"html":"Yes
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":521.0,"y":209.0,"rotation":0.0,"id":287,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":287,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":195,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-11.0,-19.0],[-87.0,-19.0],[-87.0,84.0],[-163.0,84.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":988.0,"y":232.0,"rotation":0.0,"id":282,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":282,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":201,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,18.0],[-150.0,18.0],[-150.0,68.0],[-250.0,68.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[]},{"x":664.0,"y":493.0,"rotation":0.0,"id":276,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":276,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":207,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.7071067811865475,"px":0.9999999999999998}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[6.0,-33.0],[-36.5,-33.0],[-36.5,-44.7157287525381],[-79.0,-44.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":678.0,"y":344.0,"rotation":0.0,"id":273,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":273,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.29289321881345237,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,-4.0],[-45.5,-4.0],[-45.5,87.7157287525381],[-93.0,87.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":57.0,"height":40.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"container
process
exited
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":566.0,"y":431.0,"rotation":0.0,"id":272,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":272,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":236,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":217,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-56.0,9.0],[-78.86700935788389,9.0],[-78.86700935788389,39.0],[-101.73401871576777,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":785.0,"y":119.0,"rotation":0.0,"id":270,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":270,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[5.0,1.0],[-455.46803743153555,1.0],[-455.46803743153555,165.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":829.0,"y":172.0,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":269,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":248,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,-2.0],[-1.5,-32.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":661.0,"y":189.0,"rotation":0.0,"id":267,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":267,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":195,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,2.284271247461902],[-76.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":946.0,"y":319.0,"rotation":0.0,"id":263,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":263,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":197,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.5,1.0],[81.5,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":708.0,"y":286.0,"rotation":0.0,"id":256,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":256,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":254,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5,-2.0],[-0.5,-76.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":258,"width":64.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3108108108108108,"linePerpValue":null,"cardinalityType":null,"html":"docker kill
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":710.0,"y":359.0,"rotation":0.0,"id":245,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":207,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.5,-5.0],[-2.5,81.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":247,"width":83.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":" killed by
out-of-memory
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":761.0,"y":318.0,"rotation":0.0,"id":238,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":197,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-18.5,1.0],[111.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":240,"width":85.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4363456059259962,"linePerpValue":null,"cardinalityType":null,"html":"docker restart
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":608.0,"y":319.0,"rotation":0.0,"id":232,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":191,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":211,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,1.0],[64.5,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":454.0,"y":325.0,"rotation":0.0,"id":231,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":36,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":209,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-89.46803743153555,-6.0],[86.0,-5.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":288,"width":77.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3163731357954714,"linePerpValue":null,"cardinalityType":null,"html":"docker start
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":299.0,"y":321.0,"rotation":0.0,"id":230,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":35,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":228,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-54.0,-2.0],[-4.468037431535549,-2.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":294.53196256846445,"y":284.0,"rotation":0.0,"id":209,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e6b8af","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":210,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"stopped
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":300.0,"rotation":0.0,"id":191,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":192,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"start
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":510.0,"y":170.0,"rotation":0.0,"id":195,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":196,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"kill
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":300.0,"rotation":0.0,"id":197,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":198,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"die
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":100.0,"rotation":0.0,"id":199,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":200,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"stop
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":450.0,"rotation":0.0,"id":205,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":206,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"unpause
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":670.0,"y":440.0,"rotation":0.0,"id":207,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":208,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"OOM
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":672.5,"y":284.0,"rotation":0.0,"id":211,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b6d7a8","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":212,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"running
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":60.0,"y":375.0,"rotation":0.0,"id":215,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":26,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b7b7b7","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":216,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"deleted
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":349.53196256846445,"y":420.0,"rotation":0.0,"id":227,"width":130.46803743153555,"height":116.23401871576777,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":32,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.765981284232225,"y":76.0,"rotation":45.0,"id":223,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"Restart
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":57.234018715767775,"y":75.0,"rotation":315.0,"id":219,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"Policy
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":14.734018715767775,"y":0.0,"rotation":0.0,"id":217,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.decision","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.diamond.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":218,"width":96.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"Should restart?
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}]},{"x":1027.5,"y":375.0,"rotation":0.0,"id":213,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":24,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fce5cd","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":214,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"paused
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":390.0,"rotation":0.0,"id":203,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":204,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"pause
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":510.0,"y":420.0,"rotation":0.0,"id":236,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":237,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"die
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":170.0,"rotation":0.0,"id":248,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":249,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"die
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":670.0,"y":170.0,"rotation":0.0,"id":254,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":255,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"die
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":740.0,"y":323.0,"rotation":0.0,"id":250,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":50,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":248,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-10.0,-33.0],[87.5,-113.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":253,"width":73.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"docker stop
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":300.0,"rotation":0.0,"id":233,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":234,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"start
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":230.0,"rotation":0.0,"id":201,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":202,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"restart
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1066.5,"y":298.0,"rotation":0.0,"id":264,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":264,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":233,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":201,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,2.0],[-1.5,-28.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":170.0,"y":299.0,"rotation":0.0,"id":228,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":229,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"create
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":170.0,"y":230.0,"rotation":0.0,"id":188,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":190,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"create
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":170.0,"y":390.0,"rotation":0.0,"id":193,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":194,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"destroy
","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}],"shapeStyles":{"com.gliffy.shape.uml.uml_v2.state_machine":{"fill":"#e2e2e2","stroke":"#000000","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#b7b7b7","stroke":"#333333","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"bold":true,"color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}}
\ No newline at end of file
diff --git a/docs/sources/reference/api/images/event_state.png b/docs/sources/reference/api/images/event_state.png
new file mode 100644
index 0000000000..aeeaca3478
Binary files /dev/null and b/docs/sources/reference/api/images/event_state.png differ
diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md
index cbe8f3a328..7b237c2678 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.md
+++ b/docs/sources/reference/api/remote_api_client_libraries.md
@@ -43,134 +43,140 @@ will add the libraries here.
Active |
+ Dart |
+ bwu_docker |
+ https://github.com/bwu-dart/bwu_docker |
+ Active |
+
+
Go |
go-dockerclient |
https://github.com/fsouza/go-dockerclient |
Active |
-
+
Go |
dockerclient |
https://github.com/samalba/dockerclient |
Active |
-
+
Groovy |
docker-client |
https://github.com/gesellix-docker/docker-client |
Active |
-
+
Haskell |
docker-hs |
https://github.com/denibertovic/docker-hs |
Active |
-
+
Java |
docker-java |
https://github.com/docker-java/docker-java |
Active |
-
+
Java |
docker-client |
https://github.com/spotify/docker-client |
Active |
-
+
Java |
jclouds-docker |
https://github.com/jclouds/jclouds-labs/tree/master/docker |
Active |
-
+
JavaScript (NodeJS) |
dockerode |
https://github.com/apocas/dockerode
Install via NPM: npm install dockerode |
Active |
-
+
JavaScript (NodeJS) |
docker.io |
https://github.com/appersonlabs/docker.io
Install via NPM: npm install docker.io |
Active |
-
+
JavaScript |
docker-js |
https://github.com/dgoujard/docker-js |
Outdated |
-
+
JavaScript (Angular) WebUI |
docker-cp |
https://github.com/13W/docker-cp |
Active |
-
+
JavaScript (Angular) WebUI |
dockerui |
https://github.com/crosbymichael/dockerui |
Active |
-
+
Perl |
Net::Docker |
https://metacpan.org/pod/Net::Docker |
Active |
-
+
Perl |
Eixo::Docker |
https://github.com/alambike/eixo-docker |
Active |
-
+
PHP |
Alvine |
http://pear.alvine.io/ (alpha) |
Active |
-
+
PHP |
Docker-PHP |
http://stage1.github.io/docker-php/ |
Active |
-
+
Python |
docker-py |
https://github.com/docker/docker-py |
Active |
-
+
Ruby |
docker-api |
https://github.com/swipely/docker-api |
Active |
-
+
Ruby |
docker-client |
https://github.com/geku/docker-client |
Outdated |
-
+
Rust |
docker-rust |
https://github.com/abh1nav/docker-rust |
Active |
-
+
Scala |
tugboat |
https://github.com/softprops/tugboat |
Active |
-
+
Scala |
reactive-docker |
https://github.com/almoehi/reactive-docker |
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md
index 7dbe549237..fb4d5ce1d7 100644
--- a/docs/sources/reference/builder.md
+++ b/docs/sources/reference/builder.md
@@ -892,11 +892,11 @@ consider the following Dockerfile snippet:
FROM ubuntu
RUN mkdir /myvol
- RUN echo "hello world" > /myvol/greating
+ RUN echo "hello world" > /myvol/greeting
VOLUME /myvol
This Dockerfile results in an image that causes `docker run`, to
-create a new mount point at `/myvol` and copy the `greating` file
+create a new mount point at `/myvol` and copy the `greeting` file
into the newly created volume.
> **Note**:
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index c69f0a170e..3d92b3ed67 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -149,7 +149,10 @@ expect an integer, and they can only be specified once.
--default-gateway-v6="" Container default gateway IPv6 address
--dns=[] DNS server to use
--dns-search=[] DNS search domains to use
+ --default-ulimit=[] Set default ulimit settings for containers
-e, --exec-driver="native" Exec driver to use
+ --exec-opt=[] Set exec driver options
+ --exec-root="/var/run/docker" Root of the Docker execdriver
--fixed-cidr="" IPv4 subnet for fixed IPs
--fixed-cidr-v6="" IPv6 subnet for fixed IPs
-G, --group="docker" Group for the unix socket
@@ -177,8 +180,8 @@ expect an integer, and they can only be specified once.
--tlscert="~/.docker/cert.pem" Path to TLS certificate file
--tlskey="~/.docker/key.pem" Path to TLS key file
--tlsverify=false Use TLS and verify the remote
+ --userland-proxy=true Use userland proxy for loopback traffic
-v, --version=false Print version information and quit
- --default-ulimit=[] Set default ulimit settings for containers.
Options with [] may be specified multiple times.
@@ -249,7 +252,7 @@ precedence over `HTTP_PROXY`.
### Daemon storage-driver option
The Docker daemon has support for several different image layer storage drivers: `aufs`,
-`devicemapper`, `btrfs` and `overlay`.
+`devicemapper`, `btrfs`, `zfs` and `overlay`.
The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that
is unlikely to be merged into the main kernel. These are also known to cause some
@@ -271,6 +274,11 @@ explains how to tune your existing setup without the use of options.
The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not
share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`.
+The `zfs` driver is probably not fast as `btrfs` but has a longer track record
+on stability. Thanks to `Single Copy ARC` shared blocks between clones will be
+cached only once. Use `docker -d -s zfs`. To select a different zfs filesystem
+set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options):
+
The `overlay` is a very fast union filesystem. It is now merged in the main
Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137).
Call `docker -d -s overlay` to use it.
@@ -281,10 +289,10 @@ Call `docker -d -s overlay` to use it.
#### Storage driver options
Particular storage-driver can be configured with options specified with
-`--storage-opt` flags. The only driver accepting options is `devicemapper` as
-of now. All its options are prefixed with `dm`.
+`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm` and
+options for `zfs` start with `zfs`.
-Currently supported options are:
+Currently supported options of `devicemapper`:
* `dm.basesize`
@@ -443,6 +451,17 @@ Currently supported options are:
> daemon with a supported environment.
### Docker execdriver option
+Currently supported options of `zfs`:
+
+ * `zfs.fsname`
+
+ Set zfs filesystem under which docker will create its own datasets.
+ By default docker will pick up the zfs filesystem where docker graph
+ (`/var/lib/docker`) is located.
+
+ Example use:
+
+ $ docker -d -s zfs --storage-opt zfs.fsname=zroot/docker
The Docker daemon uses a specifically built `libcontainer` execution driver as its
interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`.
@@ -642,8 +661,9 @@ is returned by the `docker attach` command to its caller too:
-m, --memory="" Memory limit for all build containers
--memory-swap="" Total memory (memory + swap), `-1` to disable swap
-c, --cpu-shares CPU Shares (relative weight)
- --cpuset-cpus="" CPUs in which to allow execution, e.g. `0-3`, `0,1`
--cpuset-mems="" MEMs in which to allow execution, e.g. `0-3`, `0,1`
+ --cpuset-cpus="" CPUs in which to allow exection, e.g. `0-3`, `0,1`
+ --cgroup-parent="" Optional parent cgroup for the container
Builds Docker images from a Dockerfile and a "context". A build's context is
the files located in the specified `PATH` or `URL`. The build process can
@@ -652,13 +672,36 @@ an [*ADD*](/reference/builder/#add) instruction to reference a file in the
context.
The `URL` parameter can specify the location of a Git repository;
-the repository acts as the build context. The system recursively clones the repository
+the repository acts as the build context. The system recursively clones the repository
and its submodules using a `git clone --depth 1 --recursive` command.
This command runs in a temporary directory on your local host.
After the command succeeds, the directory is sent to the Docker daemon as the context.
Local clones give you the ability to access private repositories using
local user credentials, VPN's, and so forth.
+Git URLs accept context configuration in their fragment section, separated by a colon `:`.
+The first part represents the reference that Git will check out, this can be either
+a branch, a tag, or a commit SHA. The second part represents a subdirectory
+inside the repository that will be used as a build context.
+
+For example, run this command to use a directory called `docker` in the branch `container`:
+
+ $ docker build https://github.com/docker/rootfs.git#container:docker
+
+The following table represents all the valid suffixes with their build contexts:
+
+Build Syntax Suffix | Commit Used | Build Context Used
+--------------------|-------------|-------------------
+`myrepo.git` | `refs/heads/master` | `/`
+`myrepo.git#mytag` | `refs/tags/mytag` | `/`
+`myrepo.git#mybranch` | `refs/heads/mybranch` | `/`
+`myrepo.git#abcdef` | `sha1 = abcdef` | `/`
+`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder`
+`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder`
+`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder`
+`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder`
+`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder`
+
Instead of specifying a context, you can pass a single Dockerfile in the
`URL` or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`:
@@ -839,6 +882,11 @@ you refer to it on the command line.
> children) for security reasons, and to ensure repeatable builds on remote
> Docker hosts. This is also the reason why `ADD ../file` will not work.
+When `docker build` is run with the `--cgroup-parent` option the containers used
+in the build will be run with the [corresponding `docker run`
+flag](/reference/run/#specifying-custom-cgroups).
+
+
## commit
Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
@@ -874,7 +922,7 @@ Supported `Dockerfile` instructions:
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
$ docker commit c3f279d17e0a SvenDowideit/testimage:version3
f5283438590d
- $ docker images | head
+ $ docker images
REPOSITORY TAG ID CREATED VIRTUAL SIZE
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
@@ -912,6 +960,7 @@ Creates a new container.
-a, --attach=[] Attach to STDIN, STDOUT or STDERR
--add-host=[] Add a custom host-to-IP mapping (host:ip)
+ --blkio-weight=0 Block IO weight (relative weight)
-c, --cpu-shares=0 CPU shares (relative weight)
--cap-add=[] Add Linux capabilities
--cap-drop=[] Drop Linux capabilities
@@ -919,6 +968,7 @@ Creates a new container.
--cidfile="" Write the container ID to the file
--cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1)
+ --cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota
--device=[] Add a host device to the container
--dns=[] Set custom DNS servers
@@ -939,8 +989,11 @@ Creates a new container.
--mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33)
--name="" Assign a name to the container
--net="bridge" Set the Network mode for the container
+ --oom-kill-disable=false Whether to disable OOM Killer for the container or not
-P, --publish-all=false Publish all exposed ports to random ports
-p, --publish=[] Publish a container's port(s) to the host
+ --pid="" PID namespace to use
+ --uts="" UTS namespace to use
--privileged=false Give extended privileges to this container
--read-only=false Mount the container's root filesystem as read only
--restart="no" Restart policy (no, on-failure[:max-retry], always)
@@ -1148,7 +1201,6 @@ You'll need two shells for this example.
-d, --detach=false Detached mode: run command in the background
-i, --interactive=false Keep STDIN open even if not attached
- --privileged=false Give extended privileges to the command
-t, --tty=false Allocate a pseudo-TTY
-u, --user= Username or UID (format: [:])
@@ -1273,7 +1325,7 @@ uses up the `VIRTUAL SIZE` listed only once.
#### Listing the most recently created images
- $ docker images | head
+ $ docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
77af4d6b9913 19 hours ago 1.089 GB
committ latest b6fa739cedf5 19 hours ago 1.089 GB
@@ -1288,7 +1340,7 @@ uses up the `VIRTUAL SIZE` listed only once.
#### Listing the full length image IDs
- $ docker images --no-trunc | head
+ $ docker images --no-trunc
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
@@ -1307,7 +1359,7 @@ called a `digest`. As long as the input used to generate the image is
unchanged, the digest value is predictable. To list image digest values, use
the `--digests` flag:
- $ docker images --digests | head
+ $ docker images --digests
REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE
localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB
@@ -1583,6 +1635,7 @@ For example:
Fetch the logs of a container
-f, --follow=false Follow log output
+ --since="" Show logs since timestamp
-t, --timestamps=false Show timestamps
--tail="all" Number of lines to show from the end of the logs
@@ -1602,6 +1655,10 @@ timestamp, for example `2014-09-16T06:17:46.000000000Z`, to each
log entry. To ensure that the timestamps for are aligned the
nano-second part of the timestamp will be padded with zero when necessary.
+The `--since` option shows logs of a container generated only after
+the given date, specified as RFC 3339 or UNIX timestamp. The `--since` option
+can be combined with the `--follow` and `--tail` options.
+
## pause
Usage: docker pause CONTAINER [CONTAINER...]
@@ -1868,12 +1925,14 @@ To remove an image using its digest:
-a, --attach=[] Attach to STDIN, STDOUT or STDERR
--add-host=[] Add a custom host-to-IP mapping (host:ip)
+ --blkio-weight=0 Block IO weight (relative weight)
-c, --cpu-shares=0 CPU shares (relative weight)
--cap-add=[] Add Linux capabilities
--cap-drop=[] Drop Linux capabilities
--cidfile="" Write the container ID to the file
--cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1)
+ --cpu-period=0 Limit the CPU CFS (Completely Fair Scheduler) period
--cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota
-d, --detach=false Run container in background and print container ID
--device=[] Add a host device to the container
@@ -1897,9 +1956,11 @@ To remove an image using its digest:
--memory-swap="" Total memory (memory + swap), '-1' to disable swap
--name="" Assign a name to the container
--net="bridge" Set the Network mode for the container
+ --oom-kill-disable=false Whether to disable OOM Killer for the container or not
-P, --publish-all=false Publish all exposed ports to random ports
-p, --publish=[] Publish a container's port(s) to the host
--pid="" PID namespace to use
+ --uts="" UTS namespace to use
--privileged=false Give extended privileges to this container
--read-only=false Mount the container's root filesystem as read only
--restart="no" Restart policy (no, on-failure[:max-retry], always)
@@ -2102,6 +2163,12 @@ Guide.
The `--link` flag will link the container named `/redis` into the newly
created container with the alias `redis`. The new container can access the
network and environment of the `redis` container via environment variables.
+The `--link` flag will also just accept the form `` in which case
+the alias will match the name. For instance, you could have written the previous
+example as:
+
+ $ docker run --link redis --name console ubuntu bash
+
The `--name` flag will assign the name `console` to the newly created
container.
@@ -2136,10 +2203,10 @@ logs could be retrieved using `docker logs`. This is
useful if you need to pipe a file or something else into a container and
retrieve the container's ID once the container has finished running.
- $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo}
- brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc
- brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd
- crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo
+ $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo}
+ brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc
+ brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd
+ crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo
It is often necessary to directly expose devices to a container. The `--device`
option enables that. For example, a specific block storage device or loop
@@ -2332,7 +2399,7 @@ It is used to create a backup that can then be used with `docker load`
It is even useful to cherry-pick particular tags of an image repository
- $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
+ $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
## search
@@ -2369,6 +2436,7 @@ more details on finding shared images from the command line.
Display a live stream of one or more containers' resource usage statistics
--help=false Print usage
+ --no-stream=false Disable streaming stats and only pull the first result
Running `docker stats` on multiple containers
diff --git a/docs/sources/reference/logging/journald.md b/docs/sources/reference/logging/journald.md
new file mode 100644
index 0000000000..9c025bfe62
--- /dev/null
+++ b/docs/sources/reference/logging/journald.md
@@ -0,0 +1,66 @@
+# Journald logging driver
+
+The `journald` logging driver sends container logs to the [systemd
+journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html). Log entries can be retrieved using the `journalctl`
+command or through use of the journal API.
+
+In addition to the text of the log message itself, the `journald` log
+driver stores the following metadata in the journal with each message:
+
+| Field | Description |
+----------------------|-------------|
+| `CONTAINER_ID` | The container ID truncated to 12 characters. |
+| `CONTAINER_ID_FULL` | The full 64-character container ID. |
+| `CONTAINER_NAME` | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. |
+
+## Usage
+
+You can configure the default logging driver by passing the
+`--log-driver` option to the Docker daemon:
+
+ docker --log-driver=journald
+
+You can set the logging driver for a specific container by using the
+`--log-driver` option to `docker run`:
+
+ docker run --log-driver=journald ...
+
+## Note regarding container names
+
+The value logged in the `CONTAINER_NAME` field is the container name
+that was set at startup. If you use `docker rename` to rename a
+container, the new name will not be reflected in the journal entries.
+Journal entries will continue to use the original name.
+
+## Retrieving log messages with journalctl
+
+You can use the `journalctl` command to retrieve log messages. You
+can apply filter expressions to limit the retrieved messages to a
+specific container. For example, to retrieve all log messages from a
+container referenced by name:
+
+ # journalctl CONTAINER_NAME=webserver
+
+You can make use of additional filters to further limit the messages
+retrieved. For example, to see just those messages generated since
+the system last booted:
+
+ # journalctl -b CONTAINER_NAME=webserver
+
+Or to retrieve log messages in JSON format with complete metadata:
+
+ # journalctl -o json CONTAINER_NAME=webserver
+
+## Retrieving log messages with the journal API
+
+This example uses the `systemd` Python module to retrieve container
+logs:
+
+ import systemd.journal
+
+ reader = systemd.journal.Reader()
+ reader.add_match('CONTAINER_NAME=web')
+
+ for msg in reader:
+ print '{CONTAINER_ID_FULL}: {MESSAGE}'.format(**msg)
+
diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md
index 990faaf6c0..7c5113f6de 100644
--- a/docs/sources/reference/run.md
+++ b/docs/sources/reference/run.md
@@ -157,6 +157,7 @@ called a digest. As long as the input used to generate the image is unchanged,
the digest value is predictable and referenceable.
## PID settings (--pid)
+
--pid="" : Set the PID (Process) Namespace mode for the container,
'host': use the host's PID namespace inside the container
@@ -177,6 +178,23 @@ within the container.
This command would allow you to use `strace` inside the container on pid 1234 on
the host.
+## UTS settings (--uts)
+
+ --uts="" : Set the UTS namespace mode for the container,
+ 'host': use the host's UTS namespace inside the container
+
+The UTS namespace is for setting the hostname and the domain that is visible
+to running processes in that namespace. By default, all containers, including
+those with `--net=host`, have their own UTS namespace. The `host` setting will
+result in the container using the same UTS namespace as the host.
+
+You may wish to share the UTS namespace with the host if you would like the
+hostname of the container to change as the hostname of the host changes. A
+more advanced use case would be changing the host's hostname from a container.
+
+> **Note**: `--uts="host"` gives the container full access to change the
+> hostname of the host and is therefore considered insecure.
+
## IPC settings (--ipc)
--ipc="" : Set the IPC mode for the container,
@@ -216,9 +234,9 @@ networking. In cases like this, you would perform I/O through files or
Your container will use the same DNS servers as the host by default, but
you can override this with `--dns`.
-By default a random MAC is generated. You can set the container's MAC address
-explicitly by providing a MAC via the `--mac-address` parameter (format:
-`12:34:56:78:9a:bc`).
+By default, the MAC address is generated using the IP address allocated to the
+container. You can set the container's MAC address explicitly by providing a
+MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).
Supported networking modes are:
@@ -282,7 +300,8 @@ With the networking mode set to `host` a container will share the host's
network stack and all interfaces from the host will be available to the
container. The container's hostname will match the hostname on the host
system. Publishing ports and linking to other containers will not work
-when sharing the host's network stack.
+when sharing the host's network stack. Note that `--add-host` `--hostname`
+`--dns` `--dns-search` and `--mac-address` is invalid in `host` netmode.
Compared to the default `bridge` mode, the `host` mode gives *significantly*
better networking performance since it uses the host's native networking stack
@@ -298,7 +317,9 @@ or a High Performance Web Server.
With the networking mode set to `container` a container will share the
network stack of another container. The other container's name must be
-provided in the format of `--net container:`.
+provided in the format of `--net container:`. Note that `--add-host`
+`--hostname` `--dns` `--dns-search` and `--mac-address` is invalid
+in `container` netmode.
Example running a Redis container with Redis binding to `localhost` then
running the `redis-cli` command and connecting to the Redis server over the
@@ -465,6 +486,13 @@ Note:
You would have to write policy defining a `svirt_apache_t` type.
+## Specifying custom cgroups
+
+Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a
+container in. This allows you to create and manage cgroups on their own. You can
+define custom resources for those cgroups and put containers under a common
+parent group.
+
## Runtime constraints on resources
The operator can also adjust the performance parameters of the
@@ -473,9 +501,12 @@ container:
-m, --memory="": Memory limit (format: , where unit = b, k, m or g)
-memory-swap="": Total memory limit (memory + swap, format: , where unit = b, k, m or g)
-c, --cpu-shares=0: CPU shares (relative weight)
+ --cpu-period=0: Limit the CPU CFS (Completely Fair Scheduler) period
--cpuset-cpus="": CPUs in which to allow execution (0-3, 0,1)
--cpuset-mems="": Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
--cpu-quota=0: Limit the CPU CFS (Completely Fair Scheduler) quota
+ --blkio-weight=0: Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+ --oom-kill-disable=true|false: Whether to disable OOM Killer for the container or not.
### Memory constraints
@@ -552,6 +583,27 @@ would be 2*300M, so processes can use 300M swap memory as well.
We set both memory and swap memory, so the processes in the container can use
300M memory and 700M swap memory.
+By default, Docker kills processes in a container if an out-of-memory (OOM)
+error occurs. To change this behaviour, use the `--oom-kill-disable` option.
+Only disable the OOM killer on containers where you have also set the
+`-m/--memory` option. If the `-m` flag is not set, this can result in the host
+running out of memory and require killing the host's system processes to free
+memory.
+
+Examples:
+
+The following example limits the memory to 100M and disables the OOM killer for
+this container:
+
+ $ docker run -ti -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash
+
+The following example, illustrates a dangerous way to use the flag:
+
+ $ docker run -ti --oom-kill-disable ubuntu:14.04 /bin/bash
+
+The container has unlimited memory which can cause the host to run out memory
+and require killing system processes to free memory.
+
### CPU share constraint
By default, all containers get the same proportion of CPU cycles. This proportion
@@ -587,6 +639,20 @@ division of CPU shares:
101 {C1} 1 100% of CPU1
102 {C1} 2 100% of CPU2
+### CPU period constraint
+
+The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use
+`--cpu-period` to set the period of CPUs to limit the container's CPU usage.
+And usually `--cpu-period` should work with `--cpu-quota`.
+
+Examples:
+
+ $ docker run -ti --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash
+
+If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms.
+
+For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt).
+
### Cpuset constraint
We can set cpus in which to allow execution for containers.
@@ -625,6 +691,30 @@ Linux Scheduler used by the kernel. Set this value to 50000 to limit the contain
to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary.
For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt).
+### Block IO bandwidth (Blkio) constraint
+
+By default, all containers get the same proportion of block IO bandwidth
+(blkio). This proportion is 500. To modify this proportion, change the
+container's blkio weight relative to the weighting of all other running
+containers using the `--blkio-weight` flag.
+
+The `--blkio-weight` flag can set the weighting to a value between 10 to 1000.
+For example, the commands below create two containers with different blkio
+weight:
+
+ $ docker run -ti --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash
+ $ docker run -ti --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash
+
+If you do block IO in the two containers at the same time, by, for example:
+
+ $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct
+
+You'll find that the proportion of time is the same as the proportion of blkio
+weights of the two containers.
+
+> **Note:** The blkio weight setting is only available for direct IO. Buffered IO
+> is not currently supported.
+
## Runtime privilege, Linux capabilities, and LXC configuration
--cap-add: Add Linux capabilities
@@ -790,7 +880,11 @@ command is not available for this logging driver
#### Logging driver: journald
-Journald logging driver for Docker. Writes log messages to journald. `docker logs` command is not available for this logging driver
+Journald logging driver for Docker. Writes log messages to journald; the container id will be stored in the journal's `CONTAINER_ID` field. `docker logs` command is not available for this logging driver. For detailed information on working with this logging driver, see [the journald logging driver](reference/logging/journald) reference documentation.
+
+#### Log Opts :
+
+Logging options for configuring a log driver. The following log options are supported: [none]
## Overriding Dockerfile image defaults
@@ -866,7 +960,7 @@ or override the Dockerfile's exposed defaults:
Both hostPort and containerPort can be specified as a range of ports.
When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`)
(use 'docker port' to see the actual mapping)
- --link="" : Add link to another container (:alias)
+ --link="" : Add link to another container (:alias or )
As mentioned previously, `EXPOSE` (and `--expose`) makes ports available
**in** a container for incoming connections. The port number on the
diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md
index 1a32cbb980..f01d783850 100644
--- a/docs/sources/release-notes.md
+++ b/docs/sources/release-notes.md
@@ -131,3 +131,6 @@ handled centrally by Machine instead of letting each driver individually do it.
host’s TLS certificates for good security practice and for if a host’s IP
address changes.
+## Docker Hub Enterprise & Commercially Supported Docker Engine
+
+See the [DHE and CS Docker Engine release notes](docker-hub-enterprise/release-notes.md).
diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md
index c29b01032c..d2305152ef 100644
--- a/docs/sources/userguide/dockerimages.md
+++ b/docs/sources/userguide/dockerimages.md
@@ -54,6 +54,13 @@ We can see three crucial pieces of information about our images in the listing.
* The tags for each image, for example `14.04`.
* The image ID of each image.
+> **Note:**
+> Previously, the `docker images` command supported the `--tree` and `--dot`
+> arguments, which displayed different visualizations of the image data. Docker
+> core removed this functionality in the 1.7 version. If you liked this
+> functionality, you can still find it in
+> [the third-party dockviz tool](https://github.com/justone/dockviz).
+
A repository potentially holds multiple variants of an image. In the case of
our `ubuntu` image we can see multiple variants covering Ubuntu 10.04, 12.04,
12.10, 13.04, 13.10 and 14.04. Each variant is identified by a tag and you can
diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md
index 8a20388463..2b7d30cd44 100644
--- a/docs/sources/userguide/dockerlinks.md
+++ b/docs/sources/userguide/dockerlinks.md
@@ -114,7 +114,7 @@ You can also use `docker inspect` to return the container's name.
$ docker inspect -f "{{ .Name }}" aed84ee21bde
/web
-> **Note:**
+> **Note:**
> Container names have to be unique. That means you can only call
> one container `web`. If you want to re-use a container name you must delete
> the old container (with `docker rm`) before you can create a new
@@ -151,6 +151,14 @@ earlier. The `--link` flag takes the form:
Where `name` is the name of the container we're linking to and `alias` is an
alias for the link name. You'll see how that alias gets used shortly.
+The `--link` flag also takes the form:
+
+ --link
+
+In which case the alias will match the name. You could have written the previous
+example as:
+
+ $ docker run -d -P --name web --link db training/webapp python app.py
Next, inspect your linked containers with `docker inspect`:
diff --git a/docs/sources/userguide/labels-custom-metadata.md b/docs/sources/userguide/labels-custom-metadata.md
index 792c2f505d..79ac42ebf4 100644
--- a/docs/sources/userguide/labels-custom-metadata.md
+++ b/docs/sources/userguide/labels-custom-metadata.md
@@ -129,10 +129,14 @@ You can view the labels via the `docker inspect` command:
}
...
- $ docker inspect -f "{{json .Labels }}" 4fa6e0f0c678
+ # Inspect labels on container
+ $ docker inspect -f "{{json .Config.Labels }}" 4fa6e0f0c678
{"Vendor":"ACME Incorporated","com.example.is-beta":"","com.example.version":"0.0.1-beta","com.example.release-date":"2015-02-12"}
+ # Inspect labels on images
+ $ docker inspect -f "{{json .ContainerConfig.Labels }}" myimage
+
## Query labels
diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md
index e33ca717d6..2c73929c9d 100644
--- a/docs/sources/userguide/usingdocker.md
+++ b/docs/sources/userguide/usingdocker.md
@@ -49,47 +49,29 @@ language powering Docker).
Last stable version: 0.8.0
-### Seeing what the Docker client can do
+## Get Docker command help
-We can see all of the commands available to us with the Docker client by
-running the `docker` binary without any options.
+You can display the help for specific Docker commands. The help details the
+options and their usage. To see a list of all the possible commands, use the
+following:
- $ docker
+ $ docker --help
-You will see a list of all currently available commands.
-
- Commands:
- attach Attach to a running container
- build Build an image from a Dockerfile
- commit Create a new image from a container's changes
- . . .
-
-### Seeing Docker command usage
-
-You can also zoom in and review the usage for specific Docker commands.
-
-Try typing Docker followed with a `[command]` to see the usage for that
-command:
-
- $ docker attach
- Help output . . .
-
-Or you can also pass the `--help` flag to the `docker` binary.
+To see usage for a specific command, specify the command with the `--help` flag:
$ docker attach --help
-This will display the help text and all available flags:
-
Usage: docker attach [OPTIONS] CONTAINER
Attach to a running container
- --no-stdin=false: Do not attach stdin
- --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only)
+ --help=false Print usage
+ --no-stdin=false Do not attach stdin
+ --sig-proxy=true Proxy all received signals to the process
> **Note:**
-> You can see a full list of Docker's commands
-> [here](/reference/commandline/cli/).
+> For further details and examples of each command, see the
+> [command reference](/reference/commandline/cli/) in this guide.
## Running a web application in Docker
diff --git a/engine/engine.go b/engine/engine.go
deleted file mode 100644
index 79fae51cc3..0000000000
--- a/engine/engine.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package engine
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/stringid"
-)
-
-// Installer is a standard interface for objects which can "install" themselves
-// on an engine by registering handlers.
-// This can be used as an entrypoint for external plugins etc.
-type Installer interface {
- Install(*Engine) error
-}
-
-type Handler func(*Job) error
-
-var globalHandlers map[string]Handler
-
-func init() {
- globalHandlers = make(map[string]Handler)
-}
-
-func Register(name string, handler Handler) error {
- _, exists := globalHandlers[name]
- if exists {
- return fmt.Errorf("Can't overwrite global handler for command %s", name)
- }
- globalHandlers[name] = handler
- return nil
-}
-
-func unregister(name string) {
- delete(globalHandlers, name)
-}
-
-// The Engine is the core of Docker.
-// It acts as a store for *containers*, and allows manipulation of these
-// containers by executing *jobs*.
-type Engine struct {
- handlers map[string]Handler
- catchall Handler
- hack Hack // data for temporary hackery (see hack.go)
- id string
- Stdout io.Writer
- Stderr io.Writer
- Stdin io.Reader
- Logging bool
- tasks sync.WaitGroup
- l sync.RWMutex // lock for shutdown
- shutdownWait sync.WaitGroup
- shutdown bool
- onShutdown []func() // shutdown handlers
-}
-
-func (eng *Engine) Register(name string, handler Handler) error {
- _, exists := eng.handlers[name]
- if exists {
- return fmt.Errorf("Can't overwrite handler for command %s", name)
- }
- eng.handlers[name] = handler
- return nil
-}
-
-func (eng *Engine) RegisterCatchall(catchall Handler) {
- eng.catchall = catchall
-}
-
-// New initializes a new engine.
-func New() *Engine {
- eng := &Engine{
- handlers: make(map[string]Handler),
- id: stringid.GenerateRandomID(),
- Stdout: os.Stdout,
- Stderr: os.Stderr,
- Stdin: os.Stdin,
- Logging: true,
- }
- eng.Register("commands", func(job *Job) error {
- for _, name := range eng.commands() {
- job.Printf("%s\n", name)
- }
- return nil
- })
- // Copy existing global handlers
- for k, v := range globalHandlers {
- eng.handlers[k] = v
- }
- return eng
-}
-
-func (eng *Engine) String() string {
- return fmt.Sprintf("%s", eng.id[:8])
-}
-
-// Commands returns a list of all currently registered commands,
-// sorted alphabetically.
-func (eng *Engine) commands() []string {
- names := make([]string, 0, len(eng.handlers))
- for name := range eng.handlers {
- names = append(names, name)
- }
- sort.Strings(names)
- return names
-}
-
-// Job creates a new job which can later be executed.
-// This function mimics `Command` from the standard os/exec package.
-func (eng *Engine) Job(name string, args ...string) *Job {
- job := &Job{
- Eng: eng,
- Name: name,
- Args: args,
- Stdin: NewInput(),
- Stdout: NewOutput(),
- Stderr: NewOutput(),
- env: &Env{},
- closeIO: true,
-
- cancelled: make(chan struct{}),
- }
- if eng.Logging {
- job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr))
- }
-
- // Catchall is shadowed by specific Register.
- if handler, exists := eng.handlers[name]; exists {
- job.handler = handler
- } else if eng.catchall != nil && name != "" {
- // empty job names are illegal, catchall or not.
- job.handler = eng.catchall
- }
- return job
-}
-
-// OnShutdown registers a new callback to be called by Shutdown.
-// This is typically used by services to perform cleanup.
-func (eng *Engine) OnShutdown(h func()) {
- eng.l.Lock()
- eng.onShutdown = append(eng.onShutdown, h)
- eng.shutdownWait.Add(1)
- eng.l.Unlock()
-}
-
-// Shutdown permanently shuts down eng as follows:
-// - It refuses all new jobs, permanently.
-// - It waits for all active jobs to complete (with no timeout)
-// - It calls all shutdown handlers concurrently (if any)
-// - It returns when all handlers complete, or after 15 seconds,
-// whichever happens first.
-func (eng *Engine) Shutdown() {
- eng.l.Lock()
- if eng.shutdown {
- eng.l.Unlock()
- eng.shutdownWait.Wait()
- return
- }
- eng.shutdown = true
- eng.l.Unlock()
- // We don't need to protect the rest with a lock, to allow
- // for other calls to immediately fail with "shutdown" instead
- // of hanging for 15 seconds.
- // This requires all concurrent calls to check for shutdown, otherwise
- // it might cause a race.
-
- // Wait for all jobs to complete.
- // Timeout after 5 seconds.
- tasksDone := make(chan struct{})
- go func() {
- eng.tasks.Wait()
- close(tasksDone)
- }()
- select {
- case <-time.After(time.Second * 5):
- case <-tasksDone:
- }
-
- // Call shutdown handlers, if any.
- // Timeout after 10 seconds.
- for _, h := range eng.onShutdown {
- go func(h func()) {
- h()
- eng.shutdownWait.Done()
- }(h)
- }
- done := make(chan struct{})
- go func() {
- eng.shutdownWait.Wait()
- close(done)
- }()
- select {
- case <-time.After(time.Second * 10):
- case <-done:
- }
- return
-}
-
-// IsShutdown returns true if the engine is in the process
-// of shutting down, or already shut down.
-// Otherwise it returns false.
-func (eng *Engine) IsShutdown() bool {
- eng.l.RLock()
- defer eng.l.RUnlock()
- return eng.shutdown
-}
-
-// ParseJob creates a new job from a text description using a shell-like syntax.
-//
-// The following syntax is used to parse `input`:
-//
-// * Words are separated using standard whitespaces as separators.
-// * Quotes and backslashes are not interpreted.
-// * Words of the form 'KEY=[VALUE]' are added to the job environment.
-// * All other words are added to the job arguments.
-//
-// For example:
-//
-// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world")
-//
-// The resulting job will have:
-// job.Args={"echo", "hello", "world"}
-// job.Env={"VERBOSE":"1", "TEST":"true"}
-//
-func (eng *Engine) ParseJob(input string) (*Job, error) {
- // FIXME: use a full-featured command parser
- scanner := bufio.NewScanner(strings.NewReader(input))
- scanner.Split(bufio.ScanWords)
- var (
- cmd []string
- env Env
- )
- for scanner.Scan() {
- word := scanner.Text()
- kv := strings.SplitN(word, "=", 2)
- if len(kv) == 2 {
- env.Set(kv[0], kv[1])
- } else {
- cmd = append(cmd, word)
- }
- }
- if len(cmd) == 0 {
- return nil, fmt.Errorf("empty command: '%s'", input)
- }
- job := eng.Job(cmd[0], cmd[1:]...)
- job.Env().Init(&env)
- return job, nil
-}
diff --git a/engine/engine_test.go b/engine/engine_test.go
deleted file mode 100644
index a6ff62c8be..0000000000
--- a/engine/engine_test.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package engine
-
-import (
- "bytes"
- "strings"
- "testing"
-
- "github.com/docker/docker/pkg/ioutils"
-)
-
-func TestRegister(t *testing.T) {
- if err := Register("dummy1", nil); err != nil {
- t.Fatal(err)
- }
-
- if err := Register("dummy1", nil); err == nil {
- t.Fatalf("Expecting error, got none")
- }
- // Register is global so let's cleanup to avoid conflicts
- defer unregister("dummy1")
-
- eng := New()
-
- //Should fail because global handlers are copied
- //at the engine creation
- if err := eng.Register("dummy1", nil); err == nil {
- t.Fatalf("Expecting error, got none")
- }
-
- if err := eng.Register("dummy2", nil); err != nil {
- t.Fatal(err)
- }
-
- if err := eng.Register("dummy2", nil); err == nil {
- t.Fatalf("Expecting error, got none")
- }
- defer unregister("dummy2")
-}
-
-func TestJob(t *testing.T) {
- eng := New()
- job1 := eng.Job("dummy1", "--level=awesome")
-
- if job1.handler != nil {
- t.Fatalf("job1.handler should be empty")
- }
-
- h := func(j *Job) error {
- j.Printf("%s\n", j.Name)
- return nil
- }
-
- eng.Register("dummy2", h)
- defer unregister("dummy2")
- job2 := eng.Job("dummy2", "--level=awesome")
-
- if job2.handler == nil {
- t.Fatalf("job2.handler shouldn't be nil")
- }
-
- if job2.handler(job2) != nil {
- t.Fatalf("handler dummy2 was not found in job2")
- }
-}
-
-func TestEngineShutdown(t *testing.T) {
- eng := New()
- if eng.IsShutdown() {
- t.Fatalf("Engine should not show as shutdown")
- }
- eng.Shutdown()
- if !eng.IsShutdown() {
- t.Fatalf("Engine should show as shutdown")
- }
-}
-
-func TestEngineCommands(t *testing.T) {
- eng := New()
- handler := func(job *Job) error { return nil }
- eng.Register("foo", handler)
- eng.Register("bar", handler)
- eng.Register("echo", handler)
- eng.Register("die", handler)
- var output bytes.Buffer
- commands := eng.Job("commands")
- commands.Stdout.Add(&output)
- commands.Run()
- expected := "bar\ncommands\ndie\necho\nfoo\n"
- if result := output.String(); result != expected {
- t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result)
- }
-}
-
-func TestEngineString(t *testing.T) {
- eng1 := New()
- eng2 := New()
- s1 := eng1.String()
- s2 := eng2.String()
- if eng1 == eng2 {
- t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
- }
-}
-
-func TestParseJob(t *testing.T) {
- eng := New()
- // Verify that the resulting job calls to the right place
- var called bool
- eng.Register("echo", func(job *Job) error {
- called = true
- return nil
- })
- input := "echo DEBUG=1 hello world VERBOSITY=42"
- job, err := eng.ParseJob(input)
- if err != nil {
- t.Fatal(err)
- }
- if job.Name != "echo" {
- t.Fatalf("Invalid job name: %v", job.Name)
- }
- if strings.Join(job.Args, ":::") != "hello:::world" {
- t.Fatalf("Invalid job args: %v", job.Args)
- }
- if job.Env().Get("DEBUG") != "1" {
- t.Fatalf("Invalid job env: %v", job.Env)
- }
- if job.Env().Get("VERBOSITY") != "42" {
- t.Fatalf("Invalid job env: %v", job.Env)
- }
- if len(job.Env().Map()) != 2 {
- t.Fatalf("Invalid job env: %v", job.Env)
- }
- if err := job.Run(); err != nil {
- t.Fatal(err)
- }
- if !called {
- t.Fatalf("Job was not called")
- }
-}
-
-func TestCatchallEmptyName(t *testing.T) {
- eng := New()
- var called bool
- eng.RegisterCatchall(func(job *Job) error {
- called = true
- return nil
- })
- err := eng.Job("").Run()
- if err == nil {
- t.Fatalf("Engine.Job(\"\").Run() should return an error")
- }
- if called {
- t.Fatalf("Engine.Job(\"\").Run() should return an error")
- }
-}
-
-// Ensure that a job within a job both using the same underlying standard
-// output writer does not close the output of the outer job when the inner
-// job's stdout is wrapped with a NopCloser. When not wrapped, it should
-// close the outer job's output.
-func TestNestedJobSharedOutput(t *testing.T) {
- var (
- outerHandler Handler
- innerHandler Handler
- wrapOutput bool
- )
-
- outerHandler = func(job *Job) error {
- job.Stdout.Write([]byte("outer1"))
-
- innerJob := job.Eng.Job("innerJob")
-
- if wrapOutput {
- innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout))
- } else {
- innerJob.Stdout.Add(job.Stdout)
- }
-
- if err := innerJob.Run(); err != nil {
- t.Fatal(err)
- }
-
- // If wrapOutput was *false* this write will do nothing.
- // FIXME (jlhawn): It should cause an error to write to
- // closed output.
- job.Stdout.Write([]byte(" outer2"))
-
- return nil
- }
-
- innerHandler = func(job *Job) error {
- job.Stdout.Write([]byte(" inner"))
-
- return nil
- }
-
- eng := New()
- eng.Register("outerJob", outerHandler)
- eng.Register("innerJob", innerHandler)
-
- // wrapOutput starts *false* so the expected
- // output of running the outer job will be:
- //
- // "outer1 inner"
- //
- outBuf := new(bytes.Buffer)
- outerJob := eng.Job("outerJob")
- outerJob.Stdout.Add(outBuf)
-
- if err := outerJob.Run(); err != nil {
- t.Fatal(err)
- }
-
- expectedOutput := "outer1 inner"
- if outBuf.String() != expectedOutput {
- t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
- }
-
- // Set wrapOutput to true so that the expected
- // output of running the outer job will be:
- //
- // "outer1 inner outer2"
- //
- wrapOutput = true
- outBuf.Reset()
- outerJob = eng.Job("outerJob")
- outerJob.Stdout.Add(outBuf)
-
- if err := outerJob.Run(); err != nil {
- t.Fatal(err)
- }
-
- expectedOutput = "outer1 inner outer2"
- if outBuf.String() != expectedOutput {
- t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
- }
-}
diff --git a/engine/env.go b/engine/env.go
deleted file mode 100644
index 107ae4a0d9..0000000000
--- a/engine/env.go
+++ /dev/null
@@ -1,313 +0,0 @@
-package engine
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "strconv"
- "strings"
- "time"
-
- "github.com/docker/docker/pkg/ioutils"
-)
-
-type Env []string
-
-// Get returns the last value associated with the given key. If there are no
-// values associated with the key, Get returns the empty string.
-func (env *Env) Get(key string) (value string) {
- // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315
- for _, kv := range *env {
- if strings.Index(kv, "=") == -1 {
- continue
- }
- parts := strings.SplitN(kv, "=", 2)
- if parts[0] != key {
- continue
- }
- if len(parts) < 2 {
- value = ""
- } else {
- value = parts[1]
- }
- }
- return
-}
-
-func (env *Env) Exists(key string) bool {
- _, exists := env.Map()[key]
- return exists
-}
-
-// Len returns the number of keys in the environment.
-// Note that len(env) might be different from env.Len(),
-// because the same key might be set multiple times.
-func (env *Env) Len() int {
- return len(env.Map())
-}
-
-func (env *Env) Init(src *Env) {
- (*env) = make([]string, 0, len(*src))
- for _, val := range *src {
- (*env) = append((*env), val)
- }
-}
-
-func (env *Env) GetBool(key string) (value bool) {
- s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
- if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
- return false
- }
- return true
-}
-
-func (env *Env) SetBool(key string, value bool) {
- if value {
- env.Set(key, "1")
- } else {
- env.Set(key, "0")
- }
-}
-
-func (env *Env) GetTime(key string) (time.Time, error) {
- t, err := time.Parse(time.RFC3339Nano, env.Get(key))
- return t, err
-}
-
-func (env *Env) SetTime(key string, t time.Time) {
- env.Set(key, t.Format(time.RFC3339Nano))
-}
-
-func (env *Env) GetInt(key string) int {
- return int(env.GetInt64(key))
-}
-
-func (env *Env) GetInt64(key string) int64 {
- s := strings.Trim(env.Get(key), " \t")
- val, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return 0
- }
- return val
-}
-
-func (env *Env) SetInt(key string, value int) {
- env.Set(key, fmt.Sprintf("%d", value))
-}
-
-func (env *Env) SetInt64(key string, value int64) {
- env.Set(key, fmt.Sprintf("%d", value))
-}
-
-// Returns nil if key not found
-func (env *Env) GetList(key string) []string {
- sval := env.Get(key)
- if sval == "" {
- return nil
- }
- l := make([]string, 0, 1)
- if err := json.Unmarshal([]byte(sval), &l); err != nil {
- l = append(l, sval)
- }
- return l
-}
-
-func (env *Env) GetSubEnv(key string) *Env {
- sval := env.Get(key)
- if sval == "" {
- return nil
- }
- buf := bytes.NewBufferString(sval)
- var sub Env
- if err := sub.Decode(buf); err != nil {
- return nil
- }
- return &sub
-}
-
-func (env *Env) SetSubEnv(key string, sub *Env) error {
- var buf bytes.Buffer
- if err := sub.Encode(&buf); err != nil {
- return err
- }
- env.Set(key, string(buf.Bytes()))
- return nil
-}
-
-func (env *Env) GetJson(key string, iface interface{}) error {
- sval := env.Get(key)
- if sval == "" {
- return nil
- }
- return json.Unmarshal([]byte(sval), iface)
-}
-
-func (env *Env) SetJson(key string, value interface{}) error {
- sval, err := json.Marshal(value)
- if err != nil {
- return err
- }
- env.Set(key, string(sval))
- return nil
-}
-
-func (env *Env) SetList(key string, value []string) error {
- return env.SetJson(key, value)
-}
-
-func (env *Env) Set(key, value string) {
- *env = append(*env, key+"="+value)
-}
-
-func NewDecoder(src io.Reader) *Decoder {
- return &Decoder{
- json.NewDecoder(src),
- }
-}
-
-type Decoder struct {
- *json.Decoder
-}
-
-func (decoder *Decoder) Decode() (*Env, error) {
- m := make(map[string]interface{})
- if err := decoder.Decoder.Decode(&m); err != nil {
- return nil, err
- }
- env := &Env{}
- for key, value := range m {
- env.SetAuto(key, value)
- }
- return env, nil
-}
-
-// DecodeEnv decodes `src` as a json dictionary, and adds
-// each decoded key-value pair to the environment.
-//
-// If `src` cannot be decoded as a json dictionary, an error
-// is returned.
-func (env *Env) Decode(src io.Reader) error {
- m := make(map[string]interface{})
- d := json.NewDecoder(src)
- // We need this or we'll lose data when we decode int64 in json
- d.UseNumber()
- if err := d.Decode(&m); err != nil {
- return err
- }
- for k, v := range m {
- env.SetAuto(k, v)
- }
- return nil
-}
-
-func (env *Env) SetAuto(k string, v interface{}) {
- // Issue 7941 - if the value in the incoming JSON is null then treat it
- // as if they never specified the property at all.
- if v == nil {
- return
- }
-
- // FIXME: we fix-convert float values to int, because
- // encoding/json decodes integers to float64, but cannot encode them back.
- // (See https://golang.org/src/pkg/encoding/json/decode.go#L46)
- if fval, ok := v.(float64); ok {
- env.SetInt64(k, int64(fval))
- } else if sval, ok := v.(string); ok {
- env.Set(k, sval)
- } else if val, err := json.Marshal(v); err == nil {
- env.Set(k, string(val))
- } else {
- env.Set(k, fmt.Sprintf("%v", v))
- }
-}
-
-func changeFloats(v interface{}) interface{} {
- switch v := v.(type) {
- case float64:
- return int(v)
- case map[string]interface{}:
- for key, val := range v {
- v[key] = changeFloats(val)
- }
- case []interface{}:
- for idx, val := range v {
- v[idx] = changeFloats(val)
- }
- }
- return v
-}
-
-func (env *Env) Encode(dst io.Writer) error {
- m := make(map[string]interface{})
- for k, v := range env.Map() {
- var val interface{}
- if err := json.Unmarshal([]byte(v), &val); err == nil {
- // FIXME: we fix-convert float values to int, because
- // encoding/json decodes integers to float64, but cannot encode them back.
- // (See https://golang.org/src/pkg/encoding/json/decode.go#L46)
- m[k] = changeFloats(val)
- } else {
- m[k] = v
- }
- }
- if err := json.NewEncoder(dst).Encode(&m); err != nil {
- return err
- }
- return nil
-}
-
-func (env *Env) WriteTo(dst io.Writer) (int64, error) {
- wc := ioutils.NewWriteCounter(dst)
- err := env.Encode(wc)
- return wc.Count, err
-}
-
-func (env *Env) Import(src interface{}) (err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("ImportEnv: %s", err)
- }
- }()
- var buf bytes.Buffer
- if err := json.NewEncoder(&buf).Encode(src); err != nil {
- return err
- }
- if err := env.Decode(&buf); err != nil {
- return err
- }
- return nil
-}
-
-func (env *Env) Map() map[string]string {
- m := make(map[string]string)
- for _, kv := range *env {
- parts := strings.SplitN(kv, "=", 2)
- m[parts[0]] = parts[1]
- }
- return m
-}
-
-// MultiMap returns a representation of env as a
-// map of string arrays, keyed by string.
-// This is the same structure as http headers for example,
-// which allow each key to have multiple values.
-func (env *Env) MultiMap() map[string][]string {
- m := make(map[string][]string)
- for _, kv := range *env {
- parts := strings.SplitN(kv, "=", 2)
- m[parts[0]] = append(m[parts[0]], parts[1])
- }
- return m
-}
-
-// InitMultiMap removes all values in env, then initializes
-// new values from the contents of m.
-func (env *Env) InitMultiMap(m map[string][]string) {
- (*env) = make([]string, 0, len(m))
- for k, vals := range m {
- for _, v := range vals {
- env.Set(k, v)
- }
- }
-}
diff --git a/engine/env_test.go b/engine/env_test.go
deleted file mode 100644
index 1398275b2b..0000000000
--- a/engine/env_test.go
+++ /dev/null
@@ -1,366 +0,0 @@
-package engine
-
-import (
- "bytes"
- "encoding/json"
- "testing"
- "time"
-
- "github.com/docker/docker/pkg/stringutils"
-)
-
-func TestEnvLenZero(t *testing.T) {
- env := &Env{}
- if env.Len() != 0 {
- t.Fatalf("%d", env.Len())
- }
-}
-
-func TestEnvLenNotZero(t *testing.T) {
- env := &Env{}
- env.Set("foo", "bar")
- env.Set("ga", "bu")
- if env.Len() != 2 {
- t.Fatalf("%d", env.Len())
- }
-}
-
-func TestEnvLenDup(t *testing.T) {
- env := &Env{
- "foo=bar",
- "foo=baz",
- "a=b",
- }
- // len(env) != env.Len()
- if env.Len() != 2 {
- t.Fatalf("%d", env.Len())
- }
-}
-
-func TestEnvGetDup(t *testing.T) {
- env := &Env{
- "foo=bar",
- "foo=baz",
- "foo=bif",
- }
- expected := "bif"
- if v := env.Get("foo"); v != expected {
- t.Fatalf("expect %q, got %q", expected, v)
- }
-}
-
-func TestNewJob(t *testing.T) {
- job := mkJob(t, "dummy", "--level=awesome")
- if job.Name != "dummy" {
- t.Fatalf("Wrong job name: %s", job.Name)
- }
- if len(job.Args) != 1 {
- t.Fatalf("Wrong number of job arguments: %d", len(job.Args))
- }
- if job.Args[0] != "--level=awesome" {
- t.Fatalf("Wrong job arguments: %s", job.Args[0])
- }
-}
-
-func TestSetenv(t *testing.T) {
- job := mkJob(t, "dummy")
- job.Setenv("foo", "bar")
- if val := job.Getenv("foo"); val != "bar" {
- t.Fatalf("Getenv returns incorrect value: %s", val)
- }
-
- job.Setenv("bar", "")
- if val := job.Getenv("bar"); val != "" {
- t.Fatalf("Getenv returns incorrect value: %s", val)
- }
- if val := job.Getenv("nonexistent"); val != "" {
- t.Fatalf("Getenv returns incorrect value: %s", val)
- }
-}
-
-func TestDecodeEnv(t *testing.T) {
- job := mkJob(t, "dummy")
- type tmp struct {
- Id1 int64
- Id2 int64
- }
- body := []byte("{\"tags\":{\"Id1\":123, \"Id2\":1234567}}")
- if err := job.DecodeEnv(bytes.NewBuffer(body)); err != nil {
- t.Fatalf("DecodeEnv failed: %v", err)
- }
- mytag := tmp{}
- if val := job.GetenvJson("tags", &mytag); val != nil {
- t.Fatalf("GetenvJson returns incorrect value: %s", val)
- }
-
- if mytag.Id1 != 123 || mytag.Id2 != 1234567 {
- t.Fatal("Get wrong values set by job.DecodeEnv")
- }
-}
-
-func TestSetenvBool(t *testing.T) {
- job := mkJob(t, "dummy")
- job.SetenvBool("foo", true)
- if val := job.GetenvBool("foo"); !val {
- t.Fatalf("GetenvBool returns incorrect value: %t", val)
- }
-
- job.SetenvBool("bar", false)
- if val := job.GetenvBool("bar"); val {
- t.Fatalf("GetenvBool returns incorrect value: %t", val)
- }
-
- if val := job.GetenvBool("nonexistent"); val {
- t.Fatalf("GetenvBool returns incorrect value: %t", val)
- }
-}
-
-func TestSetenvTime(t *testing.T) {
- job := mkJob(t, "dummy")
-
- now := time.Now()
- job.SetenvTime("foo", now)
- if val, err := job.GetenvTime("foo"); err != nil {
- t.Fatalf("GetenvTime failed to parse: %v", err)
- } else {
- nowStr := now.Format(time.RFC3339)
- valStr := val.Format(time.RFC3339)
- if nowStr != valStr {
- t.Fatalf("GetenvTime returns incorrect value: %s, Expected: %s", valStr, nowStr)
- }
- }
-
- job.Setenv("bar", "Obviously I'm not a date")
- if val, err := job.GetenvTime("bar"); err == nil {
- t.Fatalf("GetenvTime was supposed to fail, instead returned: %s", val)
- }
-}
-
-func TestSetenvInt(t *testing.T) {
- job := mkJob(t, "dummy")
-
- job.SetenvInt("foo", -42)
- if val := job.GetenvInt("foo"); val != -42 {
- t.Fatalf("GetenvInt returns incorrect value: %d", val)
- }
-
- job.SetenvInt("bar", 42)
- if val := job.GetenvInt("bar"); val != 42 {
- t.Fatalf("GetenvInt returns incorrect value: %d", val)
- }
- if val := job.GetenvInt("nonexistent"); val != 0 {
- t.Fatalf("GetenvInt returns incorrect value: %d", val)
- }
-}
-
-func TestSetenvList(t *testing.T) {
- job := mkJob(t, "dummy")
-
- job.SetenvList("foo", []string{"bar"})
- if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
- t.Fatalf("GetenvList returns incorrect value: %v", val)
- }
-
- job.SetenvList("bar", nil)
- if val := job.GetenvList("bar"); val != nil {
- t.Fatalf("GetenvList returns incorrect value: %v", val)
- }
- if val := job.GetenvList("nonexistent"); val != nil {
- t.Fatalf("GetenvList returns incorrect value: %v", val)
- }
-}
-
-func TestEnviron(t *testing.T) {
- job := mkJob(t, "dummy")
- job.Setenv("foo", "bar")
- val, exists := job.Environ()["foo"]
- if !exists {
- t.Fatalf("foo not found in the environ")
- }
- if val != "bar" {
- t.Fatalf("bar not found in the environ")
- }
-}
-
-func TestMultiMap(t *testing.T) {
- e := &Env{}
- e.Set("foo", "bar")
- e.Set("bar", "baz")
- e.Set("hello", "world")
- m := e.MultiMap()
- e2 := &Env{}
- e2.Set("old_key", "something something something")
- e2.InitMultiMap(m)
- if v := e2.Get("old_key"); v != "" {
- t.Fatalf("%#v", v)
- }
- if v := e2.Get("bar"); v != "baz" {
- t.Fatalf("%#v", v)
- }
- if v := e2.Get("hello"); v != "world" {
- t.Fatalf("%#v", v)
- }
-}
-
-func testMap(l int) [][2]string {
- res := make([][2]string, l)
- for i := 0; i < l; i++ {
- t := [2]string{stringutils.GenerateRandomAsciiString(5), stringutils.GenerateRandomAsciiString(20)}
- res[i] = t
- }
- return res
-}
-
-func BenchmarkSet(b *testing.B) {
- fix := testMap(100)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- env := &Env{}
- for _, kv := range fix {
- env.Set(kv[0], kv[1])
- }
- }
-}
-
-func BenchmarkSetJson(b *testing.B) {
- fix := testMap(100)
- type X struct {
- f string
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- env := &Env{}
- for _, kv := range fix {
- if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
- b.Fatal(err)
- }
- }
- }
-}
-
-func BenchmarkGet(b *testing.B) {
- fix := testMap(100)
- env := &Env{}
- for _, kv := range fix {
- env.Set(kv[0], kv[1])
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- for _, kv := range fix {
- env.Get(kv[0])
- }
- }
-}
-
-func BenchmarkGetJson(b *testing.B) {
- fix := testMap(100)
- env := &Env{}
- type X struct {
- f string
- }
- for _, kv := range fix {
- env.SetJson(kv[0], X{kv[1]})
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- for _, kv := range fix {
- if err := env.GetJson(kv[0], &X{}); err != nil {
- b.Fatal(err)
- }
- }
- }
-}
-
-func BenchmarkEncode(b *testing.B) {
- fix := testMap(100)
- env := &Env{}
- type X struct {
- f string
- }
- // half a json
- for i, kv := range fix {
- if i%2 != 0 {
- if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
- b.Fatal(err)
- }
- continue
- }
- env.Set(kv[0], kv[1])
- }
- var writer bytes.Buffer
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- env.Encode(&writer)
- writer.Reset()
- }
-}
-
-func BenchmarkDecode(b *testing.B) {
- fix := testMap(100)
- env := &Env{}
- type X struct {
- f string
- }
- // half a json
- for i, kv := range fix {
- if i%2 != 0 {
- if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
- b.Fatal(err)
- }
- continue
- }
- env.Set(kv[0], kv[1])
- }
- var writer bytes.Buffer
- env.Encode(&writer)
- denv := &Env{}
- reader := bytes.NewReader(writer.Bytes())
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- err := denv.Decode(reader)
- if err != nil {
- b.Fatal(err)
- }
- reader.Seek(0, 0)
- }
-}
-
-func TestLongNumbers(t *testing.T) {
- type T struct {
- TestNum int64
- }
- v := T{67108864}
- var buf bytes.Buffer
- e := &Env{}
- e.SetJson("Test", v)
- if err := e.Encode(&buf); err != nil {
- t.Fatal(err)
- }
- res := make(map[string]T)
- if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
- t.Fatal(err)
- }
- if res["Test"].TestNum != v.TestNum {
- t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
- }
-}
-
-func TestLongNumbersArray(t *testing.T) {
- type T struct {
- TestNum []int64
- }
- v := T{[]int64{67108864}}
- var buf bytes.Buffer
- e := &Env{}
- e.SetJson("Test", v)
- if err := e.Encode(&buf); err != nil {
- t.Fatal(err)
- }
- res := make(map[string]T)
- if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
- t.Fatal(err)
- }
- if res["Test"].TestNum[0] != v.TestNum[0] {
- t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
- }
-}
diff --git a/engine/hack.go b/engine/hack.go
deleted file mode 100644
index 10595ce2b1..0000000000
--- a/engine/hack.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package engine
-
-type Hack map[string]interface{}
-
-func (eng *Engine) HackGetGlobalVar(key string) interface{} {
- if eng.hack == nil {
- return nil
- }
- val, exists := eng.hack[key]
- if !exists {
- return nil
- }
- return val
-}
-
-func (eng *Engine) HackSetGlobalVar(key string, val interface{}) {
- if eng.hack == nil {
- eng.hack = make(Hack)
- }
- eng.hack[key] = val
-}
diff --git a/engine/helpers_test.go b/engine/helpers_test.go
deleted file mode 100644
index cfa11da7cd..0000000000
--- a/engine/helpers_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package engine
-
-import (
- "testing"
-)
-
-var globalTestID string
-
-func mkJob(t *testing.T, name string, args ...string) *Job {
- return New().Job(name, args...)
-}
diff --git a/engine/http.go b/engine/http.go
deleted file mode 100644
index 7e4dcd7bb4..0000000000
--- a/engine/http.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package engine
-
-import (
- "net/http"
- "path"
-)
-
-// ServeHTTP executes a job as specified by the http request `r`, and sends the
-// result as an http response.
-// This method allows an Engine instance to be passed as a standard http.Handler interface.
-//
-// Note that the protocol used in this method is a convenience wrapper and is not the canonical
-// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
-// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
-// once data has been written to the body, which makes it inconvenient to return metadata such
-// as the exit status.
-//
-func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- var (
- jobName = path.Base(r.URL.Path)
- jobArgs, exists = r.URL.Query()["a"]
- )
- if !exists {
- jobArgs = []string{}
- }
- w.Header().Set("Job-Name", jobName)
- for _, arg := range jobArgs {
- w.Header().Add("Job-Args", arg)
- }
- job := eng.Job(jobName, jobArgs...)
- job.Stdout.Add(w)
- job.Stderr.Add(w)
- // FIXME: distinguish job status from engine error in Run()
- // The former should be passed as a special header, the former
- // should cause a 500 status
- w.WriteHeader(http.StatusOK)
- // The exit status cannot be sent reliably with HTTP1, because headers
- // can only be sent before the body.
- // (we could possibly use http footers via chunked encoding, but I couldn't find
- // how to use them in net/http)
- job.Run()
-}
diff --git a/engine/job.go b/engine/job.go
deleted file mode 100644
index 12acdc9334..0000000000
--- a/engine/job.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package engine
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "sync"
- "time"
-
- "github.com/Sirupsen/logrus"
-)
-
-// A job is the fundamental unit of work in the docker engine.
-// Everything docker can do should eventually be exposed as a job.
-// For example: execute a process in a container, create a new container,
-// download an archive from the internet, serve the http api, etc.
-//
-// The job API is designed after unix processes: a job has a name, arguments,
-// environment variables, standard streams for input, output and error.
-type Job struct {
- Eng *Engine
- Name string
- Args []string
- env *Env
- Stdout *Output
- Stderr *Output
- Stdin *Input
- handler Handler
- end time.Time
- closeIO bool
-
- // When closed, the job has been cancelled.
- // Note: not all jobs implement cancellation.
- // See Job.Cancel() and Job.WaitCancelled()
- cancelled chan struct{}
- cancelOnce sync.Once
-}
-
-// Run executes the job and blocks until the job completes.
-// If the job fails it returns an error
-func (job *Job) Run() (err error) {
- defer func() {
- // Wait for all background tasks to complete
- if job.closeIO {
- if err := job.Stdout.Close(); err != nil {
- logrus.Error(err)
- }
- if err := job.Stderr.Close(); err != nil {
- logrus.Error(err)
- }
- if err := job.Stdin.Close(); err != nil {
- logrus.Error(err)
- }
- }
- }()
-
- if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
- return fmt.Errorf("engine is shutdown")
- }
- // FIXME: this is a temporary workaround to avoid Engine.Shutdown
- // waiting 5 seconds for server/api.ServeApi to complete (which it never will)
- // everytime the daemon is cleanly restarted.
- // The permanent fix is to implement Job.Stop and Job.OnStop so that
- // ServeApi can cooperate and terminate cleanly.
- if job.Name != "serveapi" {
- job.Eng.l.Lock()
- job.Eng.tasks.Add(1)
- job.Eng.l.Unlock()
- defer job.Eng.tasks.Done()
- }
- // FIXME: make this thread-safe
- // FIXME: implement wait
- if !job.end.IsZero() {
- return fmt.Errorf("%s: job has already completed", job.Name)
- }
- // Log beginning and end of the job
- if job.Eng.Logging {
- logrus.Infof("+job %s", job.CallString())
- defer func() {
- okerr := "OK"
- if err != nil {
- okerr = fmt.Sprintf("ERR: %s", err)
- }
- logrus.Infof("-job %s %s", job.CallString(), okerr)
- }()
- }
-
- if job.handler == nil {
- return fmt.Errorf("%s: command not found", job.Name)
- }
-
- var errorMessage = bytes.NewBuffer(nil)
- job.Stderr.Add(errorMessage)
-
- err = job.handler(job)
- job.end = time.Now()
-
- return
-}
-
-func (job *Job) CallString() string {
- return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
-}
-
-func (job *Job) Env() *Env {
- return job.env
-}
-
-func (job *Job) EnvExists(key string) (value bool) {
- return job.env.Exists(key)
-}
-
-func (job *Job) Getenv(key string) (value string) {
- return job.env.Get(key)
-}
-
-func (job *Job) GetenvBool(key string) (value bool) {
- return job.env.GetBool(key)
-}
-
-func (job *Job) SetenvBool(key string, value bool) {
- job.env.SetBool(key, value)
-}
-
-func (job *Job) GetenvTime(key string) (value time.Time, err error) {
- return job.env.GetTime(key)
-}
-
-func (job *Job) SetenvTime(key string, value time.Time) {
- job.env.SetTime(key, value)
-}
-
-func (job *Job) GetenvSubEnv(key string) *Env {
- return job.env.GetSubEnv(key)
-}
-
-func (job *Job) SetenvSubEnv(key string, value *Env) error {
- return job.env.SetSubEnv(key, value)
-}
-
-func (job *Job) GetenvInt64(key string) int64 {
- return job.env.GetInt64(key)
-}
-
-func (job *Job) GetenvInt(key string) int {
- return job.env.GetInt(key)
-}
-
-func (job *Job) SetenvInt64(key string, value int64) {
- job.env.SetInt64(key, value)
-}
-
-func (job *Job) SetenvInt(key string, value int) {
- job.env.SetInt(key, value)
-}
-
-// Returns nil if key not found
-func (job *Job) GetenvList(key string) []string {
- return job.env.GetList(key)
-}
-
-func (job *Job) GetenvJson(key string, iface interface{}) error {
- return job.env.GetJson(key, iface)
-}
-
-func (job *Job) SetenvJson(key string, value interface{}) error {
- return job.env.SetJson(key, value)
-}
-
-func (job *Job) SetenvList(key string, value []string) error {
- return job.env.SetJson(key, value)
-}
-
-func (job *Job) Setenv(key, value string) {
- job.env.Set(key, value)
-}
-
-// DecodeEnv decodes `src` as a json dictionary, and adds
-// each decoded key-value pair to the environment.
-//
-// If `src` cannot be decoded as a json dictionary, an error
-// is returned.
-func (job *Job) DecodeEnv(src io.Reader) error {
- return job.env.Decode(src)
-}
-
-func (job *Job) EncodeEnv(dst io.Writer) error {
- return job.env.Encode(dst)
-}
-
-func (job *Job) ImportEnv(src interface{}) (err error) {
- return job.env.Import(src)
-}
-
-func (job *Job) Environ() map[string]string {
- return job.env.Map()
-}
-
-func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
- return fmt.Fprintf(job.Stdout, format, args...)
-}
-
-func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
- return fmt.Fprintf(job.Stderr, format, args...)
-}
-
-func (job *Job) SetCloseIO(val bool) {
- job.closeIO = val
-}
-
-// When called, causes the Job.WaitCancelled channel to unblock.
-func (job *Job) Cancel() {
- job.cancelOnce.Do(func() {
- close(job.cancelled)
- })
-}
-
-// Returns a channel which is closed ("never blocks") when the job is cancelled.
-func (job *Job) WaitCancelled() <-chan struct{} {
- return job.cancelled
-}
diff --git a/engine/job_test.go b/engine/job_test.go
deleted file mode 100644
index 76135e6e67..0000000000
--- a/engine/job_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package engine
-
-import (
- "bytes"
- "errors"
- "fmt"
- "testing"
-)
-
-func TestJobOK(t *testing.T) {
- eng := New()
- eng.Register("return_ok", func(job *Job) error { return nil })
- err := eng.Job("return_ok").Run()
- if err != nil {
- t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
- }
-}
-
-func TestJobErr(t *testing.T) {
- eng := New()
- eng.Register("return_err", func(job *Job) error { return errors.New("return_err") })
- err := eng.Job("return_err").Run()
- if err == nil {
- t.Fatalf("When a job returns error, Run() should return an error")
- }
-}
-
-func TestJobStdoutString(t *testing.T) {
- eng := New()
- // FIXME: test multiple combinations of output and status
- eng.Register("say_something_in_stdout", func(job *Job) error {
- job.Printf("Hello world\n")
- return nil
- })
-
- job := eng.Job("say_something_in_stdout")
- var outputBuffer = bytes.NewBuffer(nil)
- job.Stdout.Add(outputBuffer)
- if err := job.Run(); err != nil {
- t.Fatal(err)
- }
- fmt.Println(outputBuffer)
- var output = Tail(outputBuffer, 1)
- if expectedOutput := "Hello world"; output != expectedOutput {
- t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
- }
-}
diff --git a/engine/shutdown_test.go b/engine/shutdown_test.go
deleted file mode 100644
index d2ef0339de..0000000000
--- a/engine/shutdown_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package engine
-
-import (
- "testing"
- "time"
-)
-
-func TestShutdownEmpty(t *testing.T) {
- eng := New()
- if eng.IsShutdown() {
- t.Fatalf("IsShutdown should be false")
- }
- eng.Shutdown()
- if !eng.IsShutdown() {
- t.Fatalf("IsShutdown should be true")
- }
-}
-
-func TestShutdownAfterRun(t *testing.T) {
- eng := New()
- eng.Register("foo", func(job *Job) error {
- return nil
- })
- if err := eng.Job("foo").Run(); err != nil {
- t.Fatal(err)
- }
- eng.Shutdown()
- if err := eng.Job("foo").Run(); err == nil {
- t.Fatalf("%#v", *eng)
- }
-}
-
-// An approximate and racy, but better-than-nothing test that
-//
-func TestShutdownDuringRun(t *testing.T) {
- var (
- jobDelay time.Duration = 500 * time.Millisecond
- jobDelayLow time.Duration = 100 * time.Millisecond
- jobDelayHigh time.Duration = 700 * time.Millisecond
- )
- eng := New()
- var completed bool
- eng.Register("foo", func(job *Job) error {
- time.Sleep(jobDelay)
- completed = true
- return nil
- })
- go eng.Job("foo").Run()
- time.Sleep(50 * time.Millisecond)
- done := make(chan struct{})
- var startShutdown time.Time
- go func() {
- startShutdown = time.Now()
- eng.Shutdown()
- close(done)
- }()
- time.Sleep(50 * time.Millisecond)
- if err := eng.Job("foo").Run(); err == nil {
- t.Fatalf("run on shutdown should fail: %#v", *eng)
- }
- <-done
- // Verify that Shutdown() blocks for roughly 500ms, instead
- // of returning almost instantly.
- //
- // We use >100ms to leave ample margin for race conditions between
- // goroutines. It's possible (but unlikely in reasonable testing
- // conditions), that this test will cause a false positive or false
- // negative. But it's probably better than not having any test
- // for the 99.999% of time where testing conditions are reasonable.
- if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() {
- t.Fatalf("shutdown did not block long enough: %v", d)
- } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() {
- t.Fatalf("shutdown blocked too long: %v", d)
- }
- if !completed {
- t.Fatalf("job did not complete")
- }
-}
diff --git a/engine/streams.go b/engine/streams.go
deleted file mode 100644
index 2863e94487..0000000000
--- a/engine/streams.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package engine
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "sync"
- "unicode"
-)
-
-type Output struct {
- sync.Mutex
- dests []io.Writer
- tasks sync.WaitGroup
- used bool
-}
-
-// Tail returns the n last lines of a buffer
-// stripped out of trailing white spaces, if any.
-//
-// if n <= 0, returns an empty string
-func Tail(buffer *bytes.Buffer, n int) string {
- if n <= 0 {
- return ""
- }
- s := strings.TrimRightFunc(buffer.String(), unicode.IsSpace)
- i := len(s) - 1
- for ; i >= 0 && n > 0; i-- {
- if s[i] == '\n' {
- n--
- if n == 0 {
- break
- }
- }
- }
- // when i == -1, return the whole string which is s[0:]
- return s[i+1:]
-}
-
-// NewOutput returns a new Output object with no destinations attached.
-// Writing to an empty Output will cause the written data to be discarded.
-func NewOutput() *Output {
- return &Output{}
-}
-
-// Return true if something was written on this output
-func (o *Output) Used() bool {
- o.Lock()
- defer o.Unlock()
- return o.used
-}
-
-// Add attaches a new destination to the Output. Any data subsequently written
-// to the output will be written to the new destination in addition to all the others.
-// This method is thread-safe.
-func (o *Output) Add(dst io.Writer) {
- o.Lock()
- defer o.Unlock()
- o.dests = append(o.dests, dst)
-}
-
-// Set closes and remove existing destination and then attaches a new destination to
-// the Output. Any data subsequently written to the output will be written to the new
-// destination in addition to all the others. This method is thread-safe.
-func (o *Output) Set(dst io.Writer) {
- o.Close()
- o.Lock()
- defer o.Unlock()
- o.dests = []io.Writer{dst}
-}
-
-// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
-// and returns its reading end for consumption by the caller.
-// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
-// This method is thread-safe.
-func (o *Output) AddPipe() (io.Reader, error) {
- r, w := io.Pipe()
- o.Add(w)
- return r, nil
-}
-
-// Write writes the same data to all registered destinations.
-// This method is thread-safe.
-func (o *Output) Write(p []byte) (n int, err error) {
- o.Lock()
- defer o.Unlock()
- o.used = true
- var firstErr error
- for _, dst := range o.dests {
- _, err := dst.Write(p)
- if err != nil && firstErr == nil {
- firstErr = err
- }
- }
- return len(p), firstErr
-}
-
-// Close unregisters all destinations and waits for all background
-// AddTail and AddString tasks to complete.
-// The Close method of each destination is called if it exists.
-func (o *Output) Close() error {
- o.Lock()
- defer o.Unlock()
- var firstErr error
- for _, dst := range o.dests {
- if closer, ok := dst.(io.Closer); ok {
- err := closer.Close()
- if err != nil && firstErr == nil {
- firstErr = err
- }
- }
- }
- o.tasks.Wait()
- o.dests = nil
- return firstErr
-}
-
-type Input struct {
- src io.Reader
- sync.Mutex
-}
-
-// NewInput returns a new Input object with no source attached.
-// Reading to an empty Input will return io.EOF.
-func NewInput() *Input {
- return &Input{}
-}
-
-// Read reads from the input in a thread-safe way.
-func (i *Input) Read(p []byte) (n int, err error) {
- i.Mutex.Lock()
- defer i.Mutex.Unlock()
- if i.src == nil {
- return 0, io.EOF
- }
- return i.src.Read(p)
-}
-
-// Closes the src
-// Not thread safe on purpose
-func (i *Input) Close() error {
- if i.src != nil {
- if closer, ok := i.src.(io.Closer); ok {
- return closer.Close()
- }
- }
- return nil
-}
-
-// Add attaches a new source to the input.
-// Add can only be called once per input. Subsequent calls will
-// return an error.
-func (i *Input) Add(src io.Reader) error {
- i.Mutex.Lock()
- defer i.Mutex.Unlock()
- if i.src != nil {
- return fmt.Errorf("Maximum number of sources reached: 1")
- }
- i.src = src
- return nil
-}
-
-// AddEnv starts a new goroutine which will decode all subsequent data
-// as a stream of json-encoded objects, and point `dst` to the last
-// decoded object.
-// The result `env` can be queried using the type-neutral Env interface.
-// It is not safe to query `env` until the Output is closed.
-func (o *Output) AddEnv() (dst *Env, err error) {
- src, err := o.AddPipe()
- if err != nil {
- return nil, err
- }
- dst = &Env{}
- o.tasks.Add(1)
- go func() {
- defer o.tasks.Done()
- decoder := NewDecoder(src)
- for {
- env, err := decoder.Decode()
- if err != nil {
- return
- }
- *dst = *env
- }
- }()
- return dst, nil
-}
diff --git a/engine/streams_test.go b/engine/streams_test.go
deleted file mode 100644
index c22338a32e..0000000000
--- a/engine/streams_test.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package engine
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "testing"
-)
-
-type sentinelWriteCloser struct {
- calledWrite bool
- calledClose bool
-}
-
-func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
- w.calledWrite = true
- return len(p), nil
-}
-
-func (w *sentinelWriteCloser) Close() error {
- w.calledClose = true
- return nil
-}
-
-func TestOutputAddEnv(t *testing.T) {
- input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
- o := NewOutput()
- result, err := o.AddEnv()
- if err != nil {
- t.Fatal(err)
- }
- o.Write([]byte(input))
- o.Close()
- if v := result.Get("foo"); v != "bar" {
- t.Errorf("Expected %v, got %v", "bar", v)
- }
- if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
- t.Errorf("Expected %v, got %v", 42, v)
- }
- if v := result.Get("this-value-doesnt-exist"); v != "" {
- t.Errorf("Expected %v, got %v", "", v)
- }
-}
-
-func TestOutputAddClose(t *testing.T) {
- o := NewOutput()
- var s sentinelWriteCloser
- o.Add(&s)
- if err := o.Close(); err != nil {
- t.Fatal(err)
- }
- // Write data after the output is closed.
- // Write should succeed, but no destination should receive it.
- if _, err := o.Write([]byte("foo bar")); err != nil {
- t.Fatal(err)
- }
- if !s.calledClose {
- t.Fatal("Output.Close() didn't close the destination")
- }
-}
-
-func TestOutputAddPipe(t *testing.T) {
- var testInputs = []string{
- "hello, world!",
- "One\nTwo\nThree",
- "",
- "A line\nThen another nl-terminated line\n",
- "A line followed by an empty line\n\n",
- }
- for _, input := range testInputs {
- expectedOutput := input
- o := NewOutput()
- r, err := o.AddPipe()
- if err != nil {
- t.Fatal(err)
- }
- go func(o *Output) {
- if n, err := o.Write([]byte(input)); err != nil {
- t.Error(err)
- } else if n != len(input) {
- t.Errorf("Expected %d, got %d", len(input), n)
- }
- if err := o.Close(); err != nil {
- t.Error(err)
- }
- }(o)
- output, err := ioutil.ReadAll(r)
- if err != nil {
- t.Fatal(err)
- }
- if string(output) != expectedOutput {
- t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output)
- }
- }
-}
-
-func TestTail(t *testing.T) {
- var tests = make(map[string][]string)
- tests["hello, world!"] = []string{
- "",
- "hello, world!",
- "hello, world!",
- "hello, world!",
- }
- tests["One\nTwo\nThree"] = []string{
- "",
- "Three",
- "Two\nThree",
- "One\nTwo\nThree",
- }
- tests["One\nTwo\n\n\n"] = []string{
- "",
- "Two",
- "One\nTwo",
- }
- for input, outputs := range tests {
- for n, expectedOutput := range outputs {
- output := Tail(bytes.NewBufferString(input), n)
- if output != expectedOutput {
- t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output)
- }
- }
- }
-}
-
-func lastLine(txt string) string {
- scanner := bufio.NewScanner(strings.NewReader(txt))
- var lastLine string
- for scanner.Scan() {
- lastLine = scanner.Text()
- }
- return lastLine
-}
-
-func TestOutputAdd(t *testing.T) {
- o := NewOutput()
- b := &bytes.Buffer{}
- o.Add(b)
- input := "hello, world!"
- if n, err := o.Write([]byte(input)); err != nil {
- t.Fatal(err)
- } else if n != len(input) {
- t.Fatalf("Expected %d, got %d", len(input), n)
- }
- if output := b.String(); output != input {
- t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
- }
-}
-
-func TestOutputWriteError(t *testing.T) {
- o := NewOutput()
- buf := &bytes.Buffer{}
- o.Add(buf)
- r, w := io.Pipe()
- input := "Hello there"
- expectedErr := fmt.Errorf("This is an error")
- r.CloseWithError(expectedErr)
- o.Add(w)
- n, err := o.Write([]byte(input))
- if err != expectedErr {
- t.Fatalf("Output.Write() should return the first error encountered, if any")
- }
- if buf.String() != input {
- t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
- }
- if n != len(input) {
- t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
- }
-}
-
-func TestInputAddEmpty(t *testing.T) {
- i := NewInput()
- var b bytes.Buffer
- if err := i.Add(&b); err != nil {
- t.Fatal(err)
- }
- data, err := ioutil.ReadAll(i)
- if err != nil {
- t.Fatal(err)
- }
- if len(data) > 0 {
- t.Fatalf("Read from empty input should yield no data")
- }
-}
-
-func TestInputAddTwo(t *testing.T) {
- i := NewInput()
- var b1 bytes.Buffer
- // First add should succeed
- if err := i.Add(&b1); err != nil {
- t.Fatal(err)
- }
- var b2 bytes.Buffer
- // Second add should fail
- if err := i.Add(&b2); err == nil {
- t.Fatalf("Adding a second source should return an error")
- }
-}
-
-func TestInputAddNotEmpty(t *testing.T) {
- i := NewInput()
- b := bytes.NewBufferString("hello world\nabc")
- expectedResult := b.String()
- i.Add(b)
- result, err := ioutil.ReadAll(i)
- if err != nil {
- t.Fatal(err)
- }
- if string(result) != expectedResult {
- t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
- }
-}
diff --git a/graph/export.go b/graph/export.go
index ae061a8a0f..c356a23225 100644
--- a/graph/export.go
+++ b/graph/export.go
@@ -5,7 +5,7 @@ import (
"io"
"io/ioutil"
"os"
- "path"
+ "path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
@@ -83,7 +83,7 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error {
}
// write repositories, if there is something to write
if len(rootRepoMap) > 0 {
- f, err := os.OpenFile(path.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+ f, err := os.OpenFile(filepath.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
f.Close()
return err
@@ -115,7 +115,7 @@ func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error {
func (s *TagStore) exportImage(name, tempdir string) error {
for n := name; n != ""; {
// temporary directory
- tmpImageDir := path.Join(tempdir, n)
+ tmpImageDir := filepath.Join(tempdir, n)
if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
if os.IsExist(err) {
return nil
@@ -126,12 +126,12 @@ func (s *TagStore) exportImage(name, tempdir string) error {
var version = "1.0"
var versionBuf = []byte(version)
- if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
+ if err := ioutil.WriteFile(filepath.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
return err
}
// serialize json
- json, err := os.Create(path.Join(tmpImageDir, "json"))
+ json, err := os.Create(filepath.Join(tmpImageDir, "json"))
if err != nil {
return err
}
@@ -148,7 +148,7 @@ func (s *TagStore) exportImage(name, tempdir string) error {
}
// serialize filesystem
- fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
+ fsTar, err := os.Create(filepath.Join(tmpImageDir, "layer.tar"))
if err != nil {
return err
}
diff --git a/graph/graph.go b/graph/graph.go
index 9b2d7c2ee9..e95887fab8 100644
--- a/graph/graph.go
+++ b/graph/graph.go
@@ -7,7 +7,6 @@ import (
"io"
"io/ioutil"
"os"
- "path"
"path/filepath"
"runtime"
"strings"
@@ -23,6 +22,7 @@ import (
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/runconfig"
)
@@ -42,7 +42,7 @@ func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) {
return nil, err
}
// Create the root directory if it doesn't exists
- if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
+ if err := system.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
@@ -229,8 +229,8 @@ func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormat
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) {
- dir := path.Join(graph.Root, "_tmp", stringid.GenerateRandomID())
- if err := os.MkdirAll(dir, 0700); err != nil {
+ dir := filepath.Join(graph.Root, "_tmp", stringid.GenerateRandomID())
+ if err := system.MkdirAll(dir, 0700); err != nil {
return "", err
}
return dir, nil
@@ -254,9 +254,6 @@ func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) {
if err != nil {
return 0, "", err
}
- if err = f.Sync(); err != nil {
- return 0, "", err
- }
n, err := f.Seek(0, os.SEEK_CUR)
if err != nil {
return 0, "", err
@@ -290,28 +287,28 @@ func SetupInitLayer(initLayer string) error {
parts := strings.Split(pth, "/")
prev := "/"
for _, p := range parts[1:] {
- prev = path.Join(prev, p)
- syscall.Unlink(path.Join(initLayer, prev))
+ prev = filepath.Join(prev, p)
+ syscall.Unlink(filepath.Join(initLayer, prev))
}
- if _, err := os.Stat(path.Join(initLayer, pth)); err != nil {
+ if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
if os.IsNotExist(err) {
- if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil {
+ if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil {
return err
}
switch typ {
case "dir":
- if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil {
+ if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil {
return err
}
case "file":
- f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755)
+ f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
default:
- if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil {
+ if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
return err
}
}
@@ -432,7 +429,7 @@ func (graph *Graph) Heads() (map[string]*image.Image, error) {
}
func (graph *Graph) ImageRoot(id string) string {
- return path.Join(graph.Root, id)
+ return filepath.Join(graph.Root, id)
}
func (graph *Graph) Driver() graphdriver.Driver {
diff --git a/graph/import.go b/graph/import.go
index 50e605c948..2e08e6c840 100644
--- a/graph/import.go
+++ b/graph/import.go
@@ -16,14 +16,13 @@ import (
type ImageImportConfig struct {
Changes []string
InConfig io.ReadCloser
- Json bool
OutStream io.Writer
ContainerConfig *runconfig.Config
}
func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig *ImageImportConfig) error {
var (
- sf = streamformatter.NewStreamFormatter(imageImportConfig.Json)
+ sf = streamformatter.NewJSONStreamFormatter()
archive archive.ArchiveReader
resp *http.Response
)
diff --git a/graph/load.go b/graph/load.go
index d978b1ee8e..9afde34c9a 100644
--- a/graph/load.go
+++ b/graph/load.go
@@ -1,4 +1,4 @@
-// +build linux
+// +build linux windows
package graph
@@ -7,7 +7,7 @@ import (
"io"
"io/ioutil"
"os"
- "path"
+ "path/filepath"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/image"
@@ -25,7 +25,7 @@ func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
defer os.RemoveAll(tmpImageDir)
var (
- repoDir = path.Join(tmpImageDir, "repo")
+ repoDir = filepath.Join(tmpImageDir, "repo")
)
if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
@@ -58,7 +58,7 @@ func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
}
}
- reposJSONFile, err := os.Open(path.Join(tmpImageDir, "repo", "repositories"))
+ reposJSONFile, err := os.Open(filepath.Join(tmpImageDir, "repo", "repositories"))
if err != nil {
if !os.IsNotExist(err) {
return err
@@ -87,13 +87,13 @@ func (s *TagStore) recursiveLoad(address, tmpImageDir string) error {
if _, err := s.LookupImage(address); err != nil {
logrus.Debugf("Loading %s", address)
- imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
+ imageJson, err := ioutil.ReadFile(filepath.Join(tmpImageDir, "repo", address, "json"))
if err != nil {
logrus.Debugf("Error reading json", err)
return err
}
- layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
+ layer, err := os.Open(filepath.Join(tmpImageDir, "repo", address, "layer.tar"))
if err != nil {
logrus.Debugf("Error reading embedded tar", err)
return err
diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go
index 7c51559696..45bdd98be7 100644
--- a/graph/load_unsupported.go
+++ b/graph/load_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!windows
package graph
diff --git a/graph/pull.go b/graph/pull.go
index c3c064fc58..4ebe96279d 100644
--- a/graph/pull.go
+++ b/graph/pull.go
@@ -17,21 +17,20 @@ import (
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/transport"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
type ImagePullConfig struct {
- Parallel bool
MetaHeaders map[string][]string
AuthConfig *cliconfig.AuthConfig
- Json bool
OutStream io.Writer
}
func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
var (
- sf = streamformatter.NewStreamFormatter(imagePullConfig.Json)
+ sf = streamformatter.NewJSONStreamFormatter()
)
// Resolve the Repository name from fqn to RepositoryInfo
@@ -57,12 +56,19 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))
logrus.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
- endpoint, err := repoInfo.GetEndpoint()
+
+ endpoint, err := repoInfo.GetEndpoint(imagePullConfig.MetaHeaders)
if err != nil {
return err
}
-
- r, err := registry.NewSession(imagePullConfig.AuthConfig, registry.HTTPRequestFactory(imagePullConfig.MetaHeaders), endpoint, true)
+ // TODO(tiborvass): reuse client from endpoint?
+ // Adds Docker-specific headers as well as user-specified headers (metaHeaders)
+ tr := transport.NewTransport(
+ registry.NewTransport(registry.ReceiveTimeout, endpoint.IsSecure),
+ registry.DockerHeaders(imagePullConfig.MetaHeaders)...,
+ )
+ client := registry.HTTPClient(tr)
+ r, err := registry.NewSession(client, imagePullConfig.AuthConfig, endpoint)
if err != nil {
return err
}
@@ -78,7 +84,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
}
logrus.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
- if err := s.pullV2Repository(r, imagePullConfig.OutStream, repoInfo, tag, sf, imagePullConfig.Parallel); err == nil {
+ if err := s.pullV2Repository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err == nil {
s.eventsService.Log("pull", logName, "")
return nil
} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
@@ -88,8 +94,12 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
logrus.Debug("image does not exist on v2 registry, falling back to v1")
}
+ if utils.DigestReference(tag) {
+ return fmt.Errorf("pulling with digest reference failed from v2 registry")
+ }
+
logrus.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
- if err = s.pullRepository(r, imagePullConfig.OutStream, repoInfo, tag, sf, imagePullConfig.Parallel); err != nil {
+ if err = s.pullRepository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err != nil {
return err
}
@@ -98,7 +108,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
return nil
}
-func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *streamformatter.StreamFormatter, parallel bool) error {
+func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *streamformatter.StreamFormatter) error {
out.Write(sf.FormatStatus("", "Pulling repository %s", repoInfo.CanonicalName))
repoData, err := r.GetRepositoryData(repoInfo.RemoteName)
@@ -111,7 +121,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
}
logrus.Debugf("Retrieving the tag list")
- tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName, repoData.Tokens)
+ tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName)
if err != nil {
logrus.Errorf("unable to get remote tags: %s", err)
return err
@@ -146,17 +156,13 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
for _, image := range repoData.ImgList {
downloadImage := func(img *registry.ImgData) {
if askedTag != "" && img.Tag != askedTag {
- if parallel {
- errors <- nil
- }
+ errors <- nil
return
}
if img.Tag == "" {
logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
- if parallel {
- errors <- nil
- }
+ errors <- nil
return
}
@@ -169,9 +175,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
} else {
logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
}
- if parallel {
- errors <- nil
- }
+ errors <- nil
return
}
defer s.poolRemove("pull", "img:"+img.ID)
@@ -209,36 +213,27 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
if !success {
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr)
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
- if parallel {
- errors <- err
- return
- }
+ errors <- err
+ return
}
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
- if parallel {
- errors <- nil
- }
+ errors <- nil
}
- if parallel {
- go downloadImage(image)
- } else {
- downloadImage(image)
- }
+ go downloadImage(image)
}
- if parallel {
- var lastError error
- for i := 0; i < len(repoData.ImgList); i++ {
- if err := <-errors; err != nil {
- lastError = err
- }
- }
- if lastError != nil {
- return lastError
- }
+ var lastError error
+ for i := 0; i < len(repoData.ImgList); i++ {
+ if err := <-errors; err != nil {
+ lastError = err
+ }
}
+ if lastError != nil {
+ return lastError
+ }
+
for tag, id := range tagsList {
if askedTag != "" && tag != askedTag {
continue
@@ -257,7 +252,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
}
func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *streamformatter.StreamFormatter) (bool, error) {
- history, err := r.GetRemoteHistory(imgID, endpoint, token)
+ history, err := r.GetRemoteHistory(imgID, endpoint)
if err != nil {
return false, err
}
@@ -286,7 +281,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
)
retries := 5
for j := 1; j <= retries; j++ {
- imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
+ imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint)
if err != nil && j == retries {
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layersDownloaded, err
@@ -314,7 +309,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
}
out.Write(sf.FormatProgress(stringid.TruncateID(id), status, nil))
- layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
+ layer, err := r.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}
@@ -373,7 +368,7 @@ type downloadInfo struct {
err chan error
}
-func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool) error {
+func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
if err != nil {
if repoInfo.Index.Official {
@@ -397,14 +392,14 @@ func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo
return registry.ErrDoesNotExist
}
for _, t := range tags {
- if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil {
+ if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, auth); err != nil {
return err
} else if downloaded {
layersDownloaded = true
}
}
} else {
- if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil {
+ if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, auth); err != nil {
return err
} else if downloaded {
layersDownloaded = true
@@ -419,7 +414,7 @@ func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo
return nil
}
-func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
+func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
logrus.Debugf("Pulling tag from V2 registry: %q", tag)
manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
@@ -531,16 +526,10 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
return nil
}
- if parallel {
- downloads[i].err = make(chan error)
- go func(di *downloadInfo) {
- di.err <- downloadFunc(di)
- }(&downloads[i])
- } else {
- if err := downloadFunc(&downloads[i]); err != nil {
- return false, err
- }
- }
+ downloads[i].err = make(chan error)
+ go func(di *downloadInfo) {
+ di.err <- downloadFunc(di)
+ }(&downloads[i])
}
var tagUpdated bool
diff --git a/graph/push.go b/graph/push.go
index 1b33288d8f..817ef707fc 100644
--- a/graph/push.go
+++ b/graph/push.go
@@ -7,16 +7,18 @@ import (
"io"
"io/ioutil"
"os"
- "path"
+ "path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/image"
+ "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progressreader"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/transport"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
@@ -29,7 +31,6 @@ type ImagePushConfig struct {
MetaHeaders map[string][]string
AuthConfig *cliconfig.AuthConfig
Tag string
- Json bool
OutStream io.Writer
}
@@ -141,7 +142,7 @@ func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Write
images chan imagePushData, imagesToPush chan string) {
defer wg.Done()
for image := range images {
- if err := r.LookupRemoteImage(image.id, image.endpoint, image.tokens); err != nil {
+ if err := r.LookupRemoteImage(image.id, image.endpoint); err != nil {
logrus.Errorf("Error in LookupRemoteImage: %s", err)
imagesToPush <- image.id
continue
@@ -199,7 +200,7 @@ func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteNam
}
for _, tag := range tags[id] {
out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
- if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil {
+ if err := r.PushRegistryTag(remoteName, id, tag, endpoint); err != nil {
return err
}
}
@@ -212,7 +213,7 @@ func (s *TagStore) pushRepository(r *registry.Session, out io.Writer,
repoInfo *registry.RepositoryInfo, localRepo map[string]string,
tag string, sf *streamformatter.StreamFormatter) error {
logrus.Debugf("Local repo: %s", localRepo)
- out = utils.NewWriteFlusher(out)
+ out = ioutils.NewWriteFlusher(out)
imgList, tags, err := s.getImageList(localRepo, tag)
if err != nil {
return err
@@ -246,8 +247,8 @@ func (s *TagStore) pushRepository(r *registry.Session, out io.Writer,
}
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *streamformatter.StreamFormatter) (checksum string, err error) {
- out = utils.NewWriteFlusher(out)
- jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json"))
+ out = ioutils.NewWriteFlusher(out)
+ jsonRaw, err := ioutil.ReadFile(filepath.Join(s.graph.Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
}
@@ -258,7 +259,7 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
}
// Send the json
- if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
+ if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
if err == registry.ErrAlreadyExists {
out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
return "", nil
@@ -284,14 +285,14 @@ func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep strin
NewLines: false,
ID: stringid.TruncateID(imgData.ID),
Action: "Pushing",
- }), ep, token, jsonRaw)
+ }), ep, jsonRaw)
if err != nil {
return "", err
}
imgData.Checksum = checksum
imgData.ChecksumPayload = checksumPayload
// Send the checksum
- if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
+ if err := r.PushImageChecksumRegistry(imgData, ep); err != nil {
return "", err
}
@@ -495,7 +496,7 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *
// FIXME: Allow to interrupt current push when new push of same image is done.
func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
var (
- sf = streamformatter.NewStreamFormatter(imagePushConfig.Json)
+ sf = streamformatter.NewJSONStreamFormatter()
)
// Resolve the Repository name from fqn to RepositoryInfo
@@ -509,12 +510,18 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro
}
defer s.poolRemove("push", repoInfo.LocalName)
- endpoint, err := repoInfo.GetEndpoint()
+ endpoint, err := repoInfo.GetEndpoint(imagePushConfig.MetaHeaders)
if err != nil {
return err
}
-
- r, err := registry.NewSession(imagePushConfig.AuthConfig, registry.HTTPRequestFactory(imagePushConfig.MetaHeaders), endpoint, false)
+ // TODO(tiborvass): reuse client from endpoint?
+ // Adds Docker-specific headers as well as user-specified headers (metaHeaders)
+ tr := transport.NewTransport(
+ registry.NewTransport(registry.NoTimeout, endpoint.IsSecure),
+ registry.DockerHeaders(imagePushConfig.MetaHeaders)...,
+ )
+ client := registry.HTTPClient(tr)
+ r, err := registry.NewSession(client, imagePushConfig.AuthConfig, endpoint)
if err != nil {
return err
}
diff --git a/graph/tags.go b/graph/tags.go
index abffe2f562..166a3d733f 100644
--- a/graph/tags.go
+++ b/graph/tags.go
@@ -14,6 +14,7 @@ import (
"sync"
"github.com/docker/docker/daemon/events"
+ "github.com/docker/docker/graph/tags"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/stringid"
@@ -26,9 +27,8 @@ import (
const DEFAULTTAG = "latest"
var (
- //FIXME these 2 regexes also exist in registry/v2/regexp.go
- validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`)
- validDigest = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+ //FIXME this regex also exists in registry/v2/regexp.go
+ validDigest = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
)
type TagStore struct {
@@ -248,12 +248,12 @@ func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out
return err
}
if tag == "" {
- tag = DEFAULTTAG
+ tag = tags.DEFAULTTAG
}
if err := validateRepoName(repoName); err != nil {
return err
}
- if err := ValidateTagName(tag); err != nil {
+ if err := tags.ValidateTagName(tag); err != nil {
return err
}
if err := store.reload(); err != nil {
@@ -347,9 +347,12 @@ func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error)
}
// If no matching tag is found, search through images for a matching image id
- for _, revision := range repo {
- if strings.HasPrefix(revision, refOrID) {
- return store.graph.Get(revision)
+ // iff it looks like a short ID or would look like a short ID
+ if stringid.IsShortID(stringid.TruncateID(refOrID)) {
+ for _, revision := range repo {
+ if strings.HasPrefix(revision, refOrID) {
+ return store.graph.Get(revision)
+ }
}
}
@@ -381,17 +384,6 @@ func validateRepoName(name string) error {
return nil
}
-// ValidateTagName validates the name of a tag
-func ValidateTagName(name string) error {
- if name == "" {
- return fmt.Errorf("tag name can't be empty")
- }
- if !validTagName.MatchString(name) {
- return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 1, maximum 128 in length", name)
- }
- return nil
-}
-
func validateDigest(dgst string) error {
if dgst == "" {
return errors.New("digest can't be empty")
diff --git a/graph/tags/tags.go b/graph/tags/tags.go
new file mode 100644
index 0000000000..1abb593db8
--- /dev/null
+++ b/graph/tags/tags.go
@@ -0,0 +1,24 @@
+package tags
+
+import (
+ "fmt"
+ "regexp"
+)
+
+const DEFAULTTAG = "latest"
+
+var (
+ //FIXME this regex also exists in registry/v2/regexp.go
+ validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`)
+)
+
+// ValidateTagName validates the name of a tag
+func ValidateTagName(name string) error {
+ if name == "" {
+ return fmt.Errorf("tag name can't be empty")
+ }
+ if !validTagName.MatchString(name) {
+ return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 1, maximum 128 in length", name)
+ }
+ return nil
+}
diff --git a/graph/tags/tags_unit_test.go b/graph/tags/tags_unit_test.go
new file mode 100644
index 0000000000..5114da1075
--- /dev/null
+++ b/graph/tags/tags_unit_test.go
@@ -0,0 +1,23 @@
+package tags
+
+import (
+ "testing"
+)
+
+func TestValidTagName(t *testing.T) {
+ validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"}
+ for _, tag := range validTags {
+ if err := ValidateTagName(tag); err != nil {
+ t.Errorf("'%s' should've been a valid tag", tag)
+ }
+ }
+}
+
+func TestInvalidTagName(t *testing.T) {
+ validTags := []string{"-9", ".foo", "-test", ".", "-"}
+ for _, tag := range validTags {
+ if err := ValidateTagName(tag); err == nil {
+ t.Errorf("'%s' shouldn't have been a valid tag", tag)
+ }
+ }
+}
diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go
index 0482fa58e3..d1ddc67617 100644
--- a/graph/tags_unit_test.go
+++ b/graph/tags_unit_test.go
@@ -1,6 +1,7 @@
package graph
import (
+ "archive/tar"
"bytes"
"io"
"os"
@@ -12,7 +13,6 @@ import (
_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
"github.com/docker/docker/image"
"github.com/docker/docker/utils"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
const (
@@ -181,24 +181,6 @@ func TestLookupImage(t *testing.T) {
}
}
-func TestValidTagName(t *testing.T) {
- validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"}
- for _, tag := range validTags {
- if err := ValidateTagName(tag); err != nil {
- t.Errorf("'%s' should've been a valid tag", tag)
- }
- }
-}
-
-func TestInvalidTagName(t *testing.T) {
- validTags := []string{"-9", ".foo", "-test", ".", "-"}
- for _, tag := range validTags {
- if err := ValidateTagName(tag); err == nil {
- t.Errorf("'%s' shouldn't have been a valid tag", tag)
- }
- }
-}
-
func TestValidateDigest(t *testing.T) {
tests := []struct {
input string
diff --git a/hack/install.sh b/hack/install.sh
index e15565fc79..1984d5a5fd 100755
--- a/hack/install.sh
+++ b/hack/install.sh
@@ -49,11 +49,18 @@ do_install() {
;;
esac
- if command_exists docker || command_exists lxc-docker; then
+ if command_exists docker; then
cat >&2 <<-'EOF'
- Warning: "docker" or "lxc-docker" command appears to already exist.
- Please ensure that you do not already have docker installed.
- You may press Ctrl+C now to abort this process and rectify this situation.
+ Warning: the "docker" command appears to already exist on this system.
+
+ If you already have Docker installed, this script can cause trouble, which is
+ why we're displaying this warning and provide the opportunity to cancel the
+ installation.
+
+ If you installed the current Docker package using this script and are using it
+ again to update Docker, you can safely ignore this message.
+
+ You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
diff --git a/hack/make.sh b/hack/make.sh
index 31e08cd376..0ccbdabbe7 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -57,7 +57,6 @@ DEFAULT_BUNDLES=(
test-docker-py
dynbinary
- test-integration
cover
cross
@@ -94,6 +93,13 @@ if [ ! "$GOPATH" ]; then
exit 1
fi
+if [ "$DOCKER_EXPERIMENTAL" ]; then
+ echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
+ echo >&2
+ VERSION+="-experimental"
+ DOCKER_BUILDTAGS+=" experimental"
+fi
+
if [ -z "$DOCKER_CLIENTONLY" ]; then
DOCKER_BUILDTAGS+=" daemon"
fi
@@ -110,6 +116,15 @@ if \
DOCKER_BUILDTAGS+=' btrfs_noversion'
fi
+# test whether "libdevmapper.h" is new enough to support deferred remove
+# functionality.
+if \
+ command -v gcc &> /dev/null \
+ && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - &> /dev/null ) \
+; then
+ DOCKER_BUILDTAGS+=' libdm_no_deferred_remove'
+fi
+
# Use these flags when compiling the tests and final binary
IAMSTATIC='true'
@@ -168,7 +183,12 @@ fi
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
# You can use this to select certain tests to run, eg.
#
-# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
+# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
+#
+# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
+# to run certain tests on your local host, you should run with command:
+#
+# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
#
go_test_dir() {
dir=$1
@@ -194,6 +214,7 @@ test_env() {
DEST="$DEST" \
DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \
DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
+ DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
DOCKER_HOST="$DOCKER_HOST" \
GOPATH="$GOPATH" \
HOME="$DEST/fake-HOME" \
@@ -216,7 +237,6 @@ find_dirs() {
find . -not \( \
\( \
-path './vendor/*' \
- -o -path './integration/*' \
-o -path './integration-cli/*' \
-o -path './contrib/*' \
-o -path './pkg/mflag/example/*' \
diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control
index 03caae8342..ac6541a73e 100644
--- a/hack/make/.build-deb/control
+++ b/hack/make/.build-deb/control
@@ -1,10 +1,10 @@
-Source: docker-core
+Source: docker-engine
Maintainer: Docker
Homepage: https://dockerproject.com
Vcs-Browser: https://github.com/docker/docker
Vcs-Git: git://github.com/docker/docker.git
-Package: docker-core
+Package: docker-engine
Architecture: linux-any
Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
Recommends: aufs-tools,
diff --git a/hack/make/.build-deb/docker-core.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion
similarity index 100%
rename from hack/make/.build-deb/docker-core.bash-completion
rename to hack/make/.build-deb/docker-engine.bash-completion
diff --git a/hack/make/.build-deb/docker-core.docker.default b/hack/make/.build-deb/docker-engine.docker.default
similarity index 100%
rename from hack/make/.build-deb/docker-core.docker.default
rename to hack/make/.build-deb/docker-engine.docker.default
diff --git a/hack/make/.build-deb/docker-core.docker.init b/hack/make/.build-deb/docker-engine.docker.init
similarity index 100%
rename from hack/make/.build-deb/docker-core.docker.init
rename to hack/make/.build-deb/docker-engine.docker.init
diff --git a/hack/make/.build-deb/docker-core.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart
similarity index 100%
rename from hack/make/.build-deb/docker-core.docker.upstart
rename to hack/make/.build-deb/docker-engine.docker.upstart
diff --git a/hack/make/.build-deb/docker-core.install b/hack/make/.build-deb/docker-engine.install
similarity index 56%
rename from hack/make/.build-deb/docker-core.install
rename to hack/make/.build-deb/docker-engine.install
index c3f4eb1465..a8857a96dc 100644
--- a/hack/make/.build-deb/docker-core.install
+++ b/hack/make/.build-deb/docker-engine.install
@@ -1,10 +1,11 @@
#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
-contrib/*-integration usr/share/docker-core/contrib/
-contrib/check-config.sh usr/share/docker-core/contrib/
+contrib/*-integration usr/share/docker-engine/contrib/
+contrib/check-config.sh usr/share/docker-engine/contrib/
contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
contrib/init/systemd/docker.service lib/systemd/system/
contrib/init/systemd/docker.socket lib/systemd/system/
-contrib/mk* usr/share/docker-core/contrib/
-contrib/nuke-graph-directory.sh usr/share/docker-core/contrib/
+contrib/mk* usr/share/docker-engine/contrib/
+contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
+contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
diff --git a/hack/make/.build-deb/docker-core.manpages b/hack/make/.build-deb/docker-engine.manpages
similarity index 100%
rename from hack/make/.build-deb/docker-core.manpages
rename to hack/make/.build-deb/docker-engine.manpages
diff --git a/hack/make/.build-deb/docker-core.postinst b/hack/make/.build-deb/docker-engine.postinst
similarity index 100%
rename from hack/make/.build-deb/docker-core.postinst
rename to hack/make/.build-deb/docker-engine.postinst
diff --git a/hack/make/.build-deb/docker-core.udev b/hack/make/.build-deb/docker-engine.udev
similarity index 100%
rename from hack/make/.build-deb/docker-core.udev
rename to hack/make/.build-deb/docker-engine.udev
diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules
index 3369f4fc54..fe19b729ea 100755
--- a/hack/make/.build-deb/rules
+++ b/hack/make/.build-deb/rules
@@ -4,7 +4,7 @@ VERSION = $(shell cat VERSION)
override_dh_gencontrol:
# if we're on Ubuntu, we need to Recommends: apparmor
- echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-core.substvars
+ echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
dh_gencontrol
override_dh_auto_build:
@@ -19,13 +19,13 @@ override_dh_strip:
# also, Go has lots of problems with stripping, so just don't
override_dh_auto_install:
- mkdir -p debian/docker-core/usr/bin
- cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-core/usr/bin/docker
- mkdir -p debian/docker-core/usr/libexec/docker
- cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-core/usr/libexec/docker/dockerinit
+ mkdir -p debian/docker-engine/usr/bin
+ cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-engine/usr/bin/docker
+ mkdir -p debian/docker-engine/usr/lib/docker
+ cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-engine/usr/lib/docker/dockerinit
override_dh_installinit:
- # use "docker" as our service name, not "docker-core"
+ # use "docker" as our service name, not "docker-engine"
dh_installinit --name=docker
override_dh_installudev:
diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec
new file mode 100644
index 0000000000..8cb7c11184
--- /dev/null
+++ b/hack/make/.build-rpm/docker-engine.spec
@@ -0,0 +1,184 @@
+Name: docker-engine
+Version: %{_version}
+Release: %{_release}%{?dist}
+Summary: The open-source application container engine
+
+License: ASL 2.0
+Source: %{name}.tar.gz
+
+URL: https://dockerproject.com
+Vendor: Docker
+Packager: Docker
+
+# docker builds in a checksum of dockerinit into docker,
+# # so stripping the binaries breaks docker
+%global __os_install_post %{_rpmconfigdir}/brp-compress
+%global debug_package %{nil}
+
+# is_systemd conditional
+%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7
+%global is_systemd 1
+%endif
+
+# required packages for build
+# most are already in the container (see contrib/builder/rpm/generate.sh)
+# only require systemd on those systems
+%if 0%{?is_systemd}
+BuildRequires: pkgconfig(systemd)
+Requires: systemd-units
+%else
+Requires(post): chkconfig
+Requires(preun): chkconfig
+# This is for /sbin/service
+Requires(preun): initscripts
+%endif
+
+# required packages on install
+Requires: /bin/sh
+Requires: iptables
+Requires: libc.so.6
+Requires: libcgroup
+Requires: libpthread.so.0
+Requires: libsqlite3.so.0
+Requires: tar
+Requires: xz
+%if 0%{?fedora} >= 21
+# Resolves: rhbz#1165615
+Requires: device-mapper-libs >= 1.02.90-1
+%endif
+
+# conflicting packages
+Conflicts: docker
+Conflicts: docker-io
+
+%description
+Docker is an open source project to pack, ship and run any application as a
+lightweight container
+
+Docker containers are both hardware-agnostic and platform-agnostic. This means
+they can run anywhere, from your laptop to the largest EC2 compute instance and
+everything in between - and they don't require you to use a particular
+language, framework or packaging system. That makes them great building blocks
+for deploying and scaling web apps, databases, and backend services without
+depending on a particular stack or provider.
+
+%prep
+%if 0%{?centos} <= 6
+%setup -n %{name}
+%else
+%autosetup -n %{name}
+%endif
+
+%build
+./hack/make.sh dynbinary
+# ./docs/man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
+
+%check
+./bundles/%{_origversion}/dynbinary/docker -v
+
+%install
+# install binary
+install -d $RPM_BUILD_ROOT/%{_bindir}
+install -p -m 755 bundles/%{_origversion}/dynbinary/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker
+
+# install dockerinit
+install -d $RPM_BUILD_ROOT/%{_libexecdir}/docker
+install -p -m 755 bundles/%{_origversion}/dynbinary/dockerinit-%{_origversion} $RPM_BUILD_ROOT/%{_libexecdir}/docker/dockerinit
+
+# install udev rules
+install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
+install -p -m 755 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
+
+# add init scripts
+install -d $RPM_BUILD_ROOT/etc/sysconfig
+install -d $RPM_BUILD_ROOT/%{_initddir}
+
+
+%if 0%{?is_systemd}
+install -d $RPM_BUILD_ROOT/%{_unitdir}
+install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service
+install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket
+%endif
+
+install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
+install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
+
+# add bash completions
+install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
+install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
+install -d $RPM_BUILD_ROOT/usr/share/fish/completions
+install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
+install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
+install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/completions/docker.fish
+
+# install manpages
+install -d %{buildroot}%{_mandir}/man1
+install -p -m 644 docs/man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
+install -d %{buildroot}%{_mandir}/man5
+install -p -m 644 docs/man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
+
+# add vimfiles
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
+install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
+install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
+install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
+
+# add nano
+install -d $RPM_BUILD_ROOT/usr/share/nano
+install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
+
+# list files owned by the package here
+%files
+/%{_bindir}/docker
+/%{_libexecdir}/docker/dockerinit
+/%{_sysconfdir}/udev/rules.d/80-docker.rules
+%if 0%{?is_systemd}
+/%{_unitdir}/docker.service
+/%{_unitdir}/docker.socket
+%endif
+/etc/sysconfig/docker
+/%{_initddir}/docker
+/usr/share/bash-completion/completions/docker
+/usr/share/zsh/vendor-completions/_docker
+/usr/share/fish/completions/docker.fish
+%doc
+/%{_mandir}/man1/*
+/%{_mandir}/man5/*
+/usr/share/vim/vimfiles/doc/dockerfile.txt
+/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
+/usr/share/vim/vimfiles/syntax/dockerfile.vim
+/usr/share/nano/Dockerfile.nanorc
+
+%post
+%if 0%{?is_systemd}
+%systemd_post docker
+%else
+# This adds the proper /etc/rc*.d links for the script
+/sbin/chkconfig --add docker
+%endif
+if ! getent group docker > /dev/null; then
+ groupadd --system docker
+fi
+
+%preun
+%if 0%{?is_systemd}
+%systemd_preun docker
+%else
+if [ $1 -eq 0 ] ; then
+ /sbin/service docker stop >/dev/null 2>&1
+ /sbin/chkconfig --del docker
+fi
+%endif
+
+%postun
+%if 0%{?is_systemd}
+%systemd_postun_with_restart docker
+%else
+if [ "$1" -ge "1" ] ; then
+ /sbin/service docker condrestart >/dev/null 2>&1 || :
+fi
+%endif
+
+%changelog
diff --git a/hack/make/.ensure-frozen-images b/hack/make/.ensure-frozen-images
index 379f738495..deded80e76 100644
--- a/hack/make/.ensure-frozen-images
+++ b/hack/make/.ensure-frozen-images
@@ -5,6 +5,7 @@ set -e
images=(
busybox:latest
hello-world:frozen
+ jess/unshare:latest
)
if ! docker inspect "${images[@]}" &> /dev/null; then
diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start
index 57fd525028..eecf682efa 100644
--- a/hack/make/.integration-daemon-start
+++ b/hack/make/.integration-daemon-start
@@ -14,8 +14,30 @@ exec 41>&1 42>&2
export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs}
export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native}
+export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true}
+
+# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G"
+storage_params=""
+if [ -n "$DOCKER_STORAGE_OPTS" ]; then
+ IFS=','
+ for i in ${DOCKER_STORAGE_OPTS}; do
+ storage_params="--storage-opt $i $storage_params"
+ done
+ unset IFS
+fi
if [ -z "$DOCKER_TEST_HOST" ]; then
+ # Start apparmor if it is enabled
+ if [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then
+ # reset container variable so apparmor profile is applied to process
+ # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16
+ export container=""
+ (
+ set -x
+ /etc/init.d/apparmor start
+ )
+ fi
+
export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one
( set -x; exec \
docker --daemon --debug \
@@ -23,6 +45,8 @@ if [ -z "$DOCKER_TEST_HOST" ]; then
--storage-driver "$DOCKER_GRAPHDRIVER" \
--exec-driver "$DOCKER_EXECDRIVER" \
--pidfile "$DEST/docker.pid" \
+ --userland-proxy="$DOCKER_USERLANDPROXY" \
+ $storage_params \
&> "$DEST/docker.log"
) &
trap "source '${MAKEDIR}/.integration-daemon-stop'" EXIT # make sure that if the script exits unexpectedly, we stop this daemon we just started
diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop
index 6e1dc844de..364490bdef 100644
--- a/hack/make/.integration-daemon-stop
+++ b/hack/make/.integration-daemon-stop
@@ -9,3 +9,13 @@ for pidFile in $(find "$DEST" -name docker.pid); do
echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code"
fi
done
+
+if [ -z "$DOCKER_TEST_HOST" ]; then
+ # Stop apparmor if it is enabled
+ if [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then
+ (
+ set -x
+ /etc/init.d/apparmor stop
+ )
+ fi
+fi
diff --git a/hack/make/binary b/hack/make/binary
index d3ec2939c0..7b2af5c849 100644
--- a/hack/make/binary
+++ b/hack/make/binary
@@ -13,6 +13,7 @@ fi
source "${MAKEDIR}/.go-autogen"
+echo "Building: $DEST/$BINARY_FULLNAME"
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
diff --git a/hack/make/build-deb b/hack/make/build-deb
index a5a6d43870..36e11777c3 100644
--- a/hack/make/build-deb
+++ b/hack/make/build-deb
@@ -9,7 +9,8 @@ DEST=$1
# TODO consider using frozen images for the dockercore/builder-deb tags
- debVersion="${VERSION//-/'~'}"
+ tilde='~' # ouch Bash 4.2 vs 4.3, you keel me
+ debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde
# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
gitUnix="$(git log -1 --pretty='%at')"
@@ -56,15 +57,11 @@ DEST=$1
RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog
RUN dpkg-buildpackage -uc -us
EOF
- cp -a "$DEST/$version/Dockerfile.build" . # can't use $DEST because it's in .dockerignore...
tempImage="docker-temp/build-deb:$version"
- ( set -x && docker build -t "$tempImage" -f Dockerfile.build . )
+ ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . )
docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version"
docker rmi "$tempImage"
done
- # clean up after ourselves
- rm -f Dockerfile.build
-
source "${MAKEDIR}/.integration-daemon-stop"
) 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
new file mode 100644
index 0000000000..0f3ff6d00e
--- /dev/null
+++ b/hack/make/build-rpm
@@ -0,0 +1,73 @@
+#!/bin/bash
+set -e
+
+DEST=$1
+
+# subshell so that we can export PATH without breaking other things
+(
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
+
+ # TODO consider using frozen images for the dockercore/builder-rpm tags
+
+ rpmName=docker-engine
+ rpmVersion="${VERSION%%-*}"
+ rpmRelease=1
+
+ # rpmRelease versioning is as follows
+ # Docker 1.7.0: version=1.7.0, release=1
+ # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1
+ # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH
+
+ # if we have a "-rc*" suffix, set appropriate release
+ if [[ "$VERSION" == *-rc* ]]; then
+ rcVersion=${VERSION#*-rc}
+ rpmRelease="0.${rcVersion}.rc${rcVersion}"
+ fi
+
+ # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
+ if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
+ gitUnix="$(git log -1 --pretty='%at')"
+ gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
+ gitCommit="$(git log -1 --pretty='%h')"
+ gitVersion="${gitDate}.git${gitCommit}"
+ # gitVersion is now something like '20150128.112847.17e840a'
+ rpmRelease="0.0.$gitVersion"
+ fi
+
+ rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)"
+ rpmDate="$(date +'%a %b %d %Y')"
+
+ # if go-md2man is available, pre-generate the man pages
+ ./docs/man/md2man-all.sh -q || true
+ # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this
+
+ # TODO add a configurable knob for _which_ rpms to build so we don't have to modify the file or build all of them every time we need to test
+ for dir in contrib/builder/rpm/*/; do
+ version="$(basename "$dir")"
+ suite="${version##*-}"
+
+ image="dockercore/builder-rpm:$version"
+ if ! docker inspect "$image" &> /dev/null; then
+ ( set -x && docker build -t "$image" "$dir" )
+ fi
+
+ mkdir -p "$DEST/$version"
+ cat > "$DEST/$version/Dockerfile.build" <<-EOF
+ FROM $image
+ COPY . /usr/src/${rpmName}
+ RUN mkdir -p /root/rpmbuild/SOURCES
+ WORKDIR /root/rpmbuild
+ RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS
+ RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName}
+ WORKDIR /root/rpmbuild/SPECS
+ RUN { echo '* $rpmDate $rpmPackager $rpmVersion-$rpmRelease'; echo '* Version: $VERSION'; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec
+ RUN rpmbuild -ba --define '_release $rpmRelease' --define '_version $rpmVersion' --define '_origversion $VERSION' ${rpmName}.spec
+ EOF
+ tempImage="docker-temp/build-rpm:$version"
+ ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . )
+ docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version"
+ docker rmi "$tempImage"
+ done
+
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
+) 2>&1 | tee -a $DEST/test.log
diff --git a/hack/make/test-integration b/hack/make/test-integration
deleted file mode 100644
index 206e37abf0..0000000000
--- a/hack/make/test-integration
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-set -e
-
-DEST=$1
-
-INIT=$DEST/../dynbinary/dockerinit-$VERSION
-[ -x "$INIT" ] || {
- source "${MAKEDIR}/.dockerinit"
- INIT="$DEST/dockerinit"
-}
-export TEST_DOCKERINIT_PATH="$INIT"
-
-bundle_test_integration() {
- LDFLAGS="
- $LDFLAGS
- -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
- " go_test_dir ./integration \
- "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)"
-}
-
-# this "grep" hides some really irritating warnings that "go test -coverpkg"
-# spews when it is given packages that aren't used
-bundle_test_integration 2>&1 \
- | grep --line-buffered -v '^warning: no packages being tested depend on ' \
- | tee -a "$DEST/test.log"
diff --git a/hack/release.sh b/hack/release.sh
index 04772546fd..1d3c9c372c 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -54,7 +54,7 @@ RELEASE_BUNDLES=(
if [ "$1" != '--release-regardless-of-test-failure' ]; then
RELEASE_BUNDLES=(
- test-unit test-integration
+ test-unit
"${RELEASE_BUNDLES[@]}"
test-integration-cli
)
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 8fed058522..63a2f460f9 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -36,39 +36,28 @@ clone() {
echo -n 'rm VCS, '
( cd $target_dir && rm -rf .{git,hg} )
+ echo -n 'rm vendor, '
+ ( cd $target_dir && rm -rf vendor Godeps/_workspace )
+
echo done
}
-clone git github.com/kr/pty 05017fcccf
-
+# the following lines are in sorted order, FYI
+clone git github.com/Sirupsen/logrus v0.7.3 # logrus is a common dependency among multiple deps
+clone git github.com/docker/libtrust 230dfd18c232
+clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673
clone git github.com/gorilla/context 14f550f51a
-
clone git github.com/gorilla/mux e444e69cbd
-
-clone git github.com/tchap/go-patricia v1.0.1
-
+clone git github.com/kr/pty 5cf931ef8f
+clone git github.com/mistifyio/go-zfs v2.1.0
+clone git github.com/tchap/go-patricia v2.1.0
clone hg code.google.com/p/go.net 84a4013f96e0
-
clone hg code.google.com/p/gosqlite 74691fb6f837
-clone git github.com/docker/libtrust 230dfd18c232
-
-clone git github.com/Sirupsen/logrus v0.7.2
-
-clone git github.com/go-fsnotify/fsnotify v1.0.4
-
-clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673
-
-# get Go tip's archive/tar, for xattr support and improved performance
-# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep
-if [ "$1" = '--go' ]; then
- # Go takes forever and a half to clone, so we only redownload it when explicitly requested via the "--go" flag to this script.
- clone hg code.google.com/p/go 1b17b3426e3c
- mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
- rm -rf src/code.google.com/p/go
- mkdir -p src/code.google.com/p/go/src/pkg/archive
- mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
-fi
+#get libnetwork packages
+clone git github.com/docker/libnetwork 67438080724b17b641b411322822c00d0d3c3201
+clone git github.com/vishvananda/netns 008d17ae001344769b031375bdb38a86219154c6
+clone git github.com/vishvananda/netlink 8eb64238879fed52fd51c5b30ad20b928fb4c36c
# get distribution packages
clone git github.com/docker/distribution d957768537c5af40e4f4cd96871f7b2bde9e2923
@@ -80,8 +69,8 @@ mv tmp-digest src/github.com/docker/distribution/digest
mkdir -p src/github.com/docker/distribution/registry
mv tmp-api src/github.com/docker/distribution/registry/api
-clone git github.com/docker/libcontainer bd8ec36106086f72b66e1be85a81202b93503e44
-# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
-rm -rf src/github.com/docker/libcontainer/vendor
-eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')"
-# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include
+clone git github.com/docker/libcontainer a37b2a4f152e2a1c9de596f54c051cb889de0691
+# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
+clone git github.com/coreos/go-systemd v2
+clone git github.com/godbus/dbus v2
+clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
diff --git a/image/image.go b/image/image.go
index a34d2b9408..4e37ebc42d 100644
--- a/image/image.go
+++ b/image/image.go
@@ -5,7 +5,7 @@ import (
"fmt"
"io/ioutil"
"os"
- "path"
+ "path/filepath"
"regexp"
"strconv"
"time"
@@ -19,6 +19,8 @@ import (
// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
const MaxImageDepth = 127
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+
type Image struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
@@ -55,7 +57,7 @@ func LoadImage(root string) (*Image, error) {
return nil, err
}
- if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil {
+ if buf, err := ioutil.ReadFile(filepath.Join(root, "layersize")); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
@@ -107,21 +109,21 @@ func (img *Image) SetGraph(graph Graph) {
// SaveSize stores the current `size` value of `img` in the directory `root`.
func (img *Image) SaveSize(root string) error {
- if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil {
+ if err := ioutil.WriteFile(filepath.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil {
return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err)
}
return nil
}
func (img *Image) SaveCheckSum(root, checksum string) error {
- if err := ioutil.WriteFile(path.Join(root, "checksum"), []byte(checksum), 0600); err != nil {
+ if err := ioutil.WriteFile(filepath.Join(root, "checksum"), []byte(checksum), 0600); err != nil {
return fmt.Errorf("Error storing checksum in %s/checksum: %s", root, err)
}
return nil
}
func (img *Image) GetCheckSum(root string) (string, error) {
- cs, err := ioutil.ReadFile(path.Join(root, "checksum"))
+ cs, err := ioutil.ReadFile(filepath.Join(root, "checksum"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
@@ -132,7 +134,7 @@ func (img *Image) GetCheckSum(root string) (string, error) {
}
func jsonPath(root string) string {
- return path.Join(root, "json")
+ return filepath.Join(root, "json")
}
func (img *Image) RawJson() ([]byte, error) {
@@ -266,7 +268,6 @@ func NewImgJSON(src []byte) (*Image, error) {
// Check wheather id is a valid image ID or not
func ValidateID(id string) error {
- validHex := regexp.MustCompile(`^([a-f0-9]{64})$`)
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID '%s' is invalid", id)
}
diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go
index 1fec3912e6..363d279120 100644
--- a/integration-cli/docker_api_containers_test.go
+++ b/integration-cli/docker_api_containers_test.go
@@ -1,17 +1,20 @@
package main
import (
+ "archive/tar"
"bytes"
"encoding/json"
"io"
"net/http"
+ "net/http/httputil"
+ "os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stringid"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+ "github.com/docker/docker/runconfig"
"github.com/go-check/check"
)
@@ -163,7 +166,7 @@ func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
c.Assert(status, check.Equals, http.StatusInternalServerError)
c.Assert(err, check.IsNil)
- if !strings.Contains(string(body), "Duplicate volume") {
+ if !strings.Contains(string(body), "Duplicate bind") {
c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
}
}
@@ -207,49 +210,6 @@ func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
}
}
-// Ensure that volumes-from has priority over binds/anything else
-// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start
-func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) {
- volName := "voltst2"
- volPath := "/tmp"
-
- if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
- c.Fatal(out, err)
- }
-
- name := "testing"
- config := map[string]interface{}{
- "Image": "busybox",
- "Volumes": map[string]struct{}{volPath: {}},
- }
-
- status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
- c.Assert(status, check.Equals, http.StatusCreated)
- c.Assert(err, check.IsNil)
-
- bindPath := randomUnixTmpDirPath("test")
- config = map[string]interface{}{
- "VolumesFrom": []string{volName},
- "Binds": []string{bindPath + ":/tmp"},
- }
- status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
- c.Assert(status, check.Equals, http.StatusNoContent)
- c.Assert(err, check.IsNil)
-
- pth, err := inspectFieldMap(name, "Volumes", volPath)
- if err != nil {
- c.Fatal(err)
- }
- pth2, err := inspectFieldMap(volName, "Volumes", volPath)
- if err != nil {
- c.Fatal(err)
- }
-
- if pth != pth2 {
- c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
- }
-}
-
func (s *DockerSuite) TestGetContainerStats(c *check.C) {
var (
name = "statscontainer"
@@ -674,46 +634,133 @@ func (s *DockerSuite) TestContainerApiCreate(c *check.C) {
}
func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) {
- var hostName = "test-host"
+ hostName := "test-host"
config := map[string]interface{}{
"Image": "busybox",
"Hostname": hostName,
}
- _, b, err := sockRequest("POST", "/containers/create", config)
- if err != nil && !strings.Contains(err.Error(), "200 OK: 201") {
- c.Fatal(err)
- }
- type createResp struct {
- Id string
- }
- var container createResp
- if err := json.Unmarshal(b, &container); err != nil {
+ status, body, err := sockRequest("POST", "/containers/create", config)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusCreated)
+
+ var container types.ContainerCreateResponse
+ if err := json.Unmarshal(body, &container); err != nil {
c.Fatal(err)
}
- var id = container.Id
+ status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
- _, bodyGet, err := sockRequest("GET", "/containers/"+id+"/json", nil)
-
- type configLocal struct {
- Hostname string
- }
- type getResponse struct {
- Id string
- Config configLocal
- }
-
- var containerInfo getResponse
- if err := json.Unmarshal(bodyGet, &containerInfo); err != nil {
+ var containerJSON types.ContainerJSON
+ if err := json.Unmarshal(body, &containerJSON); err != nil {
c.Fatal(err)
}
- var hostNameActual = containerInfo.Config.Hostname
- if hostNameActual != "test-host" {
- c.Fatalf("Mismatched Hostname, Expected %v, Actual: %v ", hostName, hostNameActual)
+
+ if containerJSON.Config.Hostname != hostName {
+ c.Fatalf("Mismatched Hostname, Expected %s, Actual: %s ", hostName, containerJSON.Config.Hostname)
}
}
+func (s *DockerSuite) TestContainerApiCreateWithDomainName(c *check.C) {
+ domainName := "test-domain"
+ config := map[string]interface{}{
+ "Image": "busybox",
+ "Domainname": domainName,
+ }
+
+ status, body, err := sockRequest("POST", "/containers/create", config)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusCreated)
+
+ var container types.ContainerCreateResponse
+ if err := json.Unmarshal(body, &container); err != nil {
+ c.Fatal(err)
+ }
+
+ status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
+
+ var containerJSON types.ContainerJSON
+ if err := json.Unmarshal(body, &containerJSON); err != nil {
+ c.Fatal(err)
+ }
+
+ if containerJSON.Config.Domainname != domainName {
+ c.Fatalf("Mismatched Domainname, Expected %s, Actual: %s ", domainName, containerJSON.Config.Domainname)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiCreateNetworkMode(c *check.C) {
+ UtilCreateNetworkMode(c, "host")
+ UtilCreateNetworkMode(c, "bridge")
+ UtilCreateNetworkMode(c, "container:web1")
+}
+
+func UtilCreateNetworkMode(c *check.C, networkMode string) {
+ config := map[string]interface{}{
+ "Image": "busybox",
+ "HostConfig": map[string]interface{}{"NetworkMode": networkMode},
+ }
+
+ status, body, err := sockRequest("POST", "/containers/create", config)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusCreated)
+
+ var container types.ContainerCreateResponse
+ if err := json.Unmarshal(body, &container); err != nil {
+ c.Fatal(err)
+ }
+
+ status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
+
+ var containerJSON types.ContainerJSON
+ if err := json.Unmarshal(body, &containerJSON); err != nil {
+ c.Fatal(err)
+ }
+
+ if containerJSON.HostConfig.NetworkMode != runconfig.NetworkMode(networkMode) {
+ c.Fatalf("Mismatched NetworkMode, Expected %s, Actual: %s ", networkMode, containerJSON.HostConfig.NetworkMode)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiCreateWithCpuSharesCpuset(c *check.C) {
+ config := map[string]interface{}{
+ "Image": "busybox",
+ "CpuShares": 512,
+ "CpusetCpus": "0,1",
+ }
+
+ status, body, err := sockRequest("POST", "/containers/create", config)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusCreated)
+
+ var container types.ContainerCreateResponse
+ if err := json.Unmarshal(body, &container); err != nil {
+ c.Fatal(err)
+ }
+
+ status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
+
+ var containerJson types.ContainerJSON
+
+ c.Assert(json.Unmarshal(body, &containerJson), check.IsNil)
+
+ out, err := inspectField(containerJson.Id, "HostConfig.CpuShares")
+ c.Assert(err, check.IsNil)
+ c.Assert(out, check.Equals, "512")
+
+ outCpuset, errCpuset := inspectField(containerJson.Id, "HostConfig.CpusetCpus")
+ c.Assert(errCpuset, check.IsNil, check.Commentf("Output: %s", outCpuset))
+ c.Assert(outCpuset, check.Equals, "0,1")
+}
+
func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) {
config := map[string]interface{}{
"Image": "busybox",
@@ -796,6 +843,17 @@ func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) {
if out != "" {
c.Fatalf("expected empty string, got %q", out)
}
+
+ outMemory, errMemory := inspectField(container.Id, "HostConfig.Memory")
+ c.Assert(outMemory, check.Equals, "0")
+ if errMemory != nil {
+ c.Fatal(errMemory, outMemory)
+ }
+ outMemorySwap, errMemorySwap := inspectField(container.Id, "HostConfig.MemorySwap")
+ c.Assert(outMemorySwap, check.Equals, "0")
+ if errMemorySwap != nil {
+ c.Fatal(errMemorySwap, outMemorySwap)
+ }
}
func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
@@ -858,3 +916,383 @@ func (s *DockerSuite) TestContainerApiRename(c *check.C) {
c.Fatalf("Failed to rename container, expected %v, got %v. Container rename API failed", newName, name)
}
}
+
+func (s *DockerSuite) TestContainerApiKill(c *check.C) {
+ name := "test-api-kill"
+ runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %q", err, out)
+ }
+
+ status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ state, err := inspectField(name, "State.Running")
+ if err != nil {
+ c.Fatal(err)
+ }
+ if state != "false" {
+ c.Fatalf("got wrong State from container %s: %q", name, state)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiRestart(c *check.C) {
+ name := "test-api-restart"
+ runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %q", err, out)
+ }
+
+ status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ if err := waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5); err != nil {
+ c.Fatal(err)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiRestartNotimeoutParam(c *check.C) {
+ name := "test-api-restart-no-timeout-param"
+ runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %q", err, out)
+ }
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ if err := waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5); err != nil {
+ c.Fatal(err)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiStart(c *check.C) {
+ name := "testing-start"
+ config := map[string]interface{}{
+ "Image": "busybox",
+ "Cmd": []string{"/bin/sh", "-c", "/bin/top"},
+ "OpenStdin": true,
+ }
+
+ status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
+ c.Assert(status, check.Equals, http.StatusCreated)
+ c.Assert(err, check.IsNil)
+
+ conf := make(map[string]interface{})
+ status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ // second call to start should give 304
+ status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf)
+ c.Assert(status, check.Equals, http.StatusNotModified)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiStop(c *check.C) {
+ name := "test-api-stop"
+ runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %q", err, out)
+ }
+
+ status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=1", nil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ if err := waitInspect(name, "{{ .State.Running }}", "false", 5); err != nil {
+ c.Fatal(err)
+ }
+
+ // second call to start should give 304
+ status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=1", nil)
+ c.Assert(status, check.Equals, http.StatusNotModified)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiWait(c *check.C) {
+ name := "test-api-wait"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sleep", "5")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %q", err, out)
+ }
+
+ status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil)
+ c.Assert(status, check.Equals, http.StatusOK)
+ c.Assert(err, check.IsNil)
+
+ if err := waitInspect(name, "{{ .State.Running }}", "false", 5); err != nil {
+ c.Fatal(err)
+ }
+
+ var waitres types.ContainerWaitResponse
+ if err := json.Unmarshal(body, &waitres); err != nil {
+ c.Fatalf("unable to unmarshal response body: %v", err)
+ }
+
+ if waitres.StatusCode != 0 {
+ c.Fatalf("Expected wait response StatusCode to be 0, got %d", waitres.StatusCode)
+ }
+}
+
+func (s *DockerSuite) TestContainerApiCopy(c *check.C) {
+ name := "test-container-api-copy"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test.txt")
+ _, err := runCommand(runCmd)
+ c.Assert(err, check.IsNil)
+
+ postData := types.CopyConfig{
+ Resource: "/test.txt",
+ }
+
+ status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
+
+ found := false
+ for tarReader := tar.NewReader(bytes.NewReader(body)); ; {
+ h, err := tarReader.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ c.Fatal(err)
+ }
+ if h.Name == "test.txt" {
+ found = true
+ break
+ }
+ }
+ c.Assert(found, check.Equals, true)
+}
+
+func (s *DockerSuite) TestContainerApiCopyResourcePathEmpty(c *check.C) {
+ name := "test-container-api-copy-resource-empty"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test.txt")
+ _, err := runCommand(runCmd)
+ c.Assert(err, check.IsNil)
+
+ postData := types.CopyConfig{
+ Resource: "",
+ }
+
+ status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusInternalServerError)
+ c.Assert(string(body), check.Matches, "Path cannot be empty\n")
+}
+
+func (s *DockerSuite) TestContainerApiCopyResourcePathNotFound(c *check.C) {
+ name := "test-container-api-copy-resource-not-found"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox")
+ _, err := runCommand(runCmd)
+ c.Assert(err, check.IsNil)
+
+ postData := types.CopyConfig{
+ Resource: "/notexist",
+ }
+
+ status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusInternalServerError)
+ c.Assert(string(body), check.Matches, "Could not find the file /notexist in container "+name+"\n")
+}
+
+func (s *DockerSuite) TestContainerApiCopyContainerNotFound(c *check.C) {
+ postData := types.CopyConfig{
+ Resource: "/something",
+ }
+
+ status, _, err := sockRequest("POST", "/containers/notexists/copy", postData)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusNotFound)
+}
+
+func (s *DockerSuite) TestContainerApiDelete(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ stopCmd := exec.Command(dockerBinary, "stop", id)
+ _, err = runCommand(stopCmd)
+ c.Assert(err, check.IsNil)
+
+ status, _, err := sockRequest("DELETE", "/containers/"+id, nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteNotExist(c *check.C) {
+ status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusNotFound)
+ c.Assert(string(body), check.Matches, "no such id: doesnotexist\n")
+}
+
+func (s *DockerSuite) TestContainerApiDeleteForce(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteRemoveLinks(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "tlink1", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ runCmd = exec.Command(dockerBinary, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top")
+ out, _, err = runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id2 := strings.TrimSpace(out)
+ c.Assert(waitRun(id2), check.IsNil)
+
+ links, err := inspectFieldJSON(id2, "HostConfig.Links")
+ c.Assert(err, check.IsNil)
+
+ if links != "[\"/tlink1:/tlink2/tlink1\"]" {
+ c.Fatal("expected to have links between containers")
+ }
+
+ status, _, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+
+ linksPostRm, err := inspectFieldJSON(id2, "HostConfig.Links")
+ c.Assert(err, check.IsNil)
+
+ if linksPostRm != "null" {
+ c.Fatal("call to api deleteContainer links should have removed the specified links")
+ }
+}
+
+func (s *DockerSuite) TestContainerApiDeleteConflict(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ status, _, err := sockRequest("DELETE", "/containers/"+id, nil)
+ c.Assert(status, check.Equals, http.StatusConflict)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) {
+ testRequires(c, SameHostDaemon)
+
+ runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/testvolume", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ vol, err := inspectFieldMap(id, "Volumes", "/testvolume")
+ c.Assert(err, check.IsNil)
+
+ _, err = os.Stat(vol)
+ c.Assert(err, check.IsNil)
+
+ status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil)
+ c.Assert(status, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ if _, err := os.Stat(vol); !os.IsNotExist(err) {
+ c.Fatalf("expected to get ErrNotExist error, got %v", err)
+ }
+}
+
+// Regression test for https://github.com/docker/docker/issues/6231
+func (s *DockerSuite) TestContainersApiChunkedEncoding(c *check.C) {
+ out, _ := dockerCmd(c, "create", "-v", "/foo", "busybox", "true")
+ id := strings.TrimSpace(out)
+
+ conn, err := sockConn(time.Duration(10 * time.Second))
+ if err != nil {
+ c.Fatal(err)
+ }
+ client := httputil.NewClientConn(conn, nil)
+ defer client.Close()
+
+ bindCfg := strings.NewReader(`{"Binds": ["/tmp:/foo"]}`)
+ req, err := http.NewRequest("POST", "/containers/"+id+"/start", bindCfg)
+ if err != nil {
+ c.Fatal(err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ // This is a cheat to make the http request do chunked encoding
+ // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
+ // https://golang.org/src/pkg/net/http/request.go?s=11980:12172
+ req.ContentLength = -1
+
+ resp, err := client.Do(req)
+ if err != nil {
+ c.Fatalf("error starting container with chunked encoding: %v", err)
+ }
+ resp.Body.Close()
+ if resp.StatusCode != 204 {
+ c.Fatalf("expected status code 204, got %d", resp.StatusCode)
+ }
+
+ out, err = inspectFieldJSON(id, "HostConfig.Binds")
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ var binds []string
+ if err := json.NewDecoder(strings.NewReader(out)).Decode(&binds); err != nil {
+ c.Fatal(err)
+ }
+ if len(binds) != 1 {
+ c.Fatalf("got unexpected binds: %v", binds)
+ }
+
+ expected := "/tmp:/foo"
+ if binds[0] != expected {
+ c.Fatalf("got incorrect bind spec, wanted %s, got: %s", expected, binds[0])
+ }
+}
+
+func (s *DockerSuite) TestPostContainerStop(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ containerID := strings.TrimSpace(out)
+ c.Assert(waitRun(containerID), check.IsNil)
+
+ statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil)
+
+ // 204 No Content is expected, not 200
+ c.Assert(statusCode, check.Equals, http.StatusNoContent)
+ c.Assert(err, check.IsNil)
+
+ if err := waitInspect(containerID, "{{ .State.Running }}", "false", 5); err != nil {
+ c.Fatal(err)
+ }
+}
diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go
index e88fbaeaad..573a89046f 100644
--- a/integration-cli/docker_api_images_test.go
+++ b/integration-cli/docker_api_images_test.go
@@ -11,21 +11,6 @@ import (
"github.com/go-check/check"
)
-func (s *DockerSuite) TestLegacyImages(c *check.C) {
- status, body, err := sockRequest("GET", "/v1.6/images/json", nil)
- c.Assert(status, check.Equals, http.StatusOK)
- c.Assert(err, check.IsNil)
-
- images := []types.LegacyImage{}
- if err = json.Unmarshal(body, &images); err != nil {
- c.Fatalf("Error on unmarshal: %s", err)
- }
-
- if len(images) == 0 || images[0].Tag == "" || images[0].Repository == "" {
- c.Fatalf("Bad data: %q", images)
- }
-}
-
func (s *DockerSuite) TestApiImagesFilter(c *check.C) {
name := "utest:tag1"
name2 := "utest/docker:tag2"
@@ -35,7 +20,7 @@ func (s *DockerSuite) TestApiImagesFilter(c *check.C) {
c.Fatal(err, out)
}
}
- type image struct{ RepoTags []string }
+ type image types.Image
getImages := func(filter string) []image {
v := url.Values{}
v.Set("filter", filter)
@@ -98,3 +83,52 @@ func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) {
c.Fatal("load did not work properly")
}
}
+
+func (s *DockerSuite) TestApiImagesDelete(c *check.C) {
+ testRequires(c, Network)
+ name := "test-api-images-delete"
+ out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false)
+ if err != nil {
+ c.Fatal(err)
+ }
+ defer deleteImages(name)
+ id := strings.TrimSpace(out)
+
+ if out, err := exec.Command(dockerBinary, "tag", name, "test:tag1").CombinedOutput(); err != nil {
+ c.Fatal(err, out)
+ }
+
+ status, _, err := sockRequest("DELETE", "/images/"+id, nil)
+ c.Assert(status, check.Equals, http.StatusConflict)
+ c.Assert(err, check.IsNil)
+
+ status, _, err = sockRequest("DELETE", "/images/test:noexist", nil)
+ c.Assert(status, check.Equals, http.StatusNotFound) //Status Codes:404 – no such image
+ c.Assert(err, check.IsNil)
+
+ status, _, err = sockRequest("DELETE", "/images/test:tag1", nil)
+ c.Assert(status, check.Equals, http.StatusOK)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestApiImagesHistory(c *check.C) {
+ testRequires(c, Network)
+ name := "test-api-images-history"
+ out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false)
+ c.Assert(err, check.IsNil)
+
+ defer deleteImages(name)
+ id := strings.TrimSpace(out)
+
+ status, body, err := sockRequest("GET", "/images/"+id+"/history", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, http.StatusOK)
+
+ var historydata []types.ImageHistory
+ if err = json.Unmarshal(body, &historydata); err != nil {
+ c.Fatalf("Error on unmarshal: %s", err)
+ }
+
+ c.Assert(len(historydata), check.Not(check.Equals), 0)
+ c.Assert(historydata[0].Tags[0], check.Equals, "test-api-images-history:latest")
+}
diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go
index b90bdc7120..62f32357ce 100644
--- a/integration-cli/docker_api_inspect_test.go
+++ b/integration-cli/docker_api_inspect_test.go
@@ -18,40 +18,27 @@ func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) {
cleanedContainerID := strings.TrimSpace(out)
- // test on json marshal version
- // and latest version
- testVersions := []string{"v1.11", "latest"}
+ endpoint := "/containers/" + cleanedContainerID + "/json"
+ status, body, err := sockRequest("GET", endpoint, nil)
+ c.Assert(status, check.Equals, http.StatusOK)
+ c.Assert(err, check.IsNil)
- for _, testVersion := range testVersions {
- endpoint := "/containers/" + cleanedContainerID + "/json"
- if testVersion != "latest" {
- endpoint = "/" + testVersion + endpoint
- }
- status, body, err := sockRequest("GET", endpoint, nil)
- c.Assert(status, check.Equals, http.StatusOK)
- c.Assert(err, check.IsNil)
+ var inspectJSON map[string]interface{}
+ if err = json.Unmarshal(body, &inspectJSON); err != nil {
+ c.Fatalf("unable to unmarshal body for latest version: %v", err)
+ }
- var inspectJSON map[string]interface{}
- if err = json.Unmarshal(body, &inspectJSON); err != nil {
- c.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err)
- }
+ keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"}
- keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"}
+ keys = append(keys, "Id")
- if testVersion == "v1.11" {
- keys = append(keys, "ID")
- } else {
- keys = append(keys, "Id")
- }
-
- for _, key := range keys {
- if _, ok := inspectJSON[key]; !ok {
- c.Fatalf("%s does not exist in response for %s version", key, testVersion)
- }
- }
- //Issue #6830: type not properly converted to JSON/back
- if _, ok := inspectJSON["Path"].(bool); ok {
- c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
+ for _, key := range keys {
+ if _, ok := inspectJSON[key]; !ok {
+ c.Fatalf("%s does not exist in response for latest version", key)
}
}
+ //Issue #6830: type not properly converted to JSON/back
+ if _, ok := inspectJSON["Path"].(bool); ok {
+ c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
+ }
}
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
index f9284494d2..caef7255d7 100644
--- a/integration-cli/docker_api_logs_test.go
+++ b/integration-cli/docker_api_logs_test.go
@@ -60,3 +60,24 @@ func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) {
c.Fatalf("Expected %s, got %s", expected, string(body[:]))
}
}
+
+// Regression test for #12704
+func (s *DockerSuite) TestLogsApiFollowEmptyOutput(c *check.C) {
+ name := "logs_test"
+ t0 := time.Now()
+ runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10")
+ if out, _, err := runCommandWithOutput(runCmd); err != nil {
+ c.Fatal(out, err)
+ }
+
+ _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "")
+ t1 := time.Now()
+ body.Close()
+ if err != nil {
+ c.Fatal(err)
+ }
+ elapsed := t1.Sub(t0).Seconds()
+ if elapsed > 5.0 {
+ c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed)
+ }
+}
diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go
new file mode 100644
index 0000000000..0bd48880a8
--- /dev/null
+++ b/integration-cli/docker_api_test.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "net/http"
+ "net/http/httputil"
+ "time"
+
+ "github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestApiOptionsRoute(c *check.C) {
+ status, _, err := sockRequest("OPTIONS", "/", nil)
+ c.Assert(status, check.Equals, http.StatusOK)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) {
+ res, body, err := sockRequestRaw("GET", "/version", nil, "")
+ body.Close()
+ c.Assert(err, check.IsNil)
+ c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+ // TODO: @runcom incomplete tests, why old integration tests had this headers
+ // and here none of the headers below are in the response?
+ //c.Log(res.Header)
+ //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
+ //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
+}
+
+func (s *DockerSuite) TestVersionStatusCode(c *check.C) {
+ conn, err := sockConn(time.Duration(10 * time.Second))
+ c.Assert(err, check.IsNil)
+
+ client := httputil.NewClientConn(conn, nil)
+ defer client.Close()
+
+ req, err := http.NewRequest("GET", "/v999.0/version", nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("User-Agent", "Docker-Client/999.0")
+
+ res, err := client.Do(req)
+ c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest)
+}
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index b74dce2cfa..5e35d6e360 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -611,7 +611,7 @@ ONBUILD ENTRYPOINT ["echo"]`,
}
-func (s *DockerSuite) TestBuildCacheADD(c *check.C) {
+func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
name := "testbuildtwoimageswithadd"
server, err := fakeStorage(map[string]string{
"robots.txt": "hello",
@@ -1112,10 +1112,10 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
})
- defer ctx.Close()
if err != nil {
c.Fatal(err)
}
+ defer ctx.Close()
id1, err := buildImageFromContext(name, ctx, true)
if err != nil {
@@ -1154,6 +1154,31 @@ func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) {
}
+func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
+ name := "testcopywildcardinname"
+ defer deleteImages(name)
+ ctx, err := fakeContext(`FROM busybox
+ COPY *.txt /tmp/
+ RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
+ `, map[string]string{"*.txt": "hi there"})
+
+ if err != nil {
+ // Normally we would do c.Fatal(err) here but given that
+ // the odds of this failing are so rare, it must be because
+ // the OS we're running the client on doesn't support * in
+ // filenames (like windows). So, instead of failing the test
+ // just let it pass. Then we don't need to explicitly
+ // say which OSs this works on or not.
+ return
+ }
+ defer ctx.Close()
+
+ _, err = buildImageFromContext(name, ctx, true)
+ if err != nil {
+ c.Fatalf("should have built: %q", err)
+ }
+}
+
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx, err := fakeContext(`FROM busybox
@@ -2649,7 +2674,7 @@ func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDLocalFileWithCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) {
name := "testbuildaddlocalfilewithcache"
name2 := "testbuildaddlocalfilewithcache2"
dockerfile := `
@@ -2677,7 +2702,7 @@ func (s *DockerSuite) TestBuildADDLocalFileWithCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDMultipleLocalFileWithCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
name2 := "testbuildaddmultiplelocalfilewithcache2"
dockerfile := `
@@ -2705,7 +2730,7 @@ func (s *DockerSuite) TestBuildADDMultipleLocalFileWithCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDLocalFileWithoutCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) {
name := "testbuildaddlocalfilewithoutcache"
name2 := "testbuildaddlocalfilewithoutcache2"
dockerfile := `
@@ -2763,7 +2788,7 @@ func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDCurrentDirWithCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
@@ -2827,7 +2852,7 @@ func (s *DockerSuite) TestBuildADDCurrentDirWithCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDCurrentDirWithoutCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
name2 := "testbuildaddcurrentdirwithoutcache2"
dockerfile := `
@@ -2854,7 +2879,7 @@ func (s *DockerSuite) TestBuildADDCurrentDirWithoutCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDRemoteFileWithCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) {
name := "testbuildaddremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
@@ -2885,7 +2910,7 @@ func (s *DockerSuite) TestBuildADDRemoteFileWithCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDRemoteFileWithoutCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) {
name := "testbuildaddremotefilewithoutcache"
name2 := "testbuildaddremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
@@ -2917,7 +2942,7 @@ func (s *DockerSuite) TestBuildADDRemoteFileWithoutCache(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDRemoteFileMTime(c *check.C) {
+func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
@@ -2988,7 +3013,7 @@ func (s *DockerSuite) TestBuildADDRemoteFileMTime(c *check.C) {
}
}
-func (s *DockerSuite) TestBuildADDLocalAndRemoteFilesWithCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithcache"
server, err := fakeStorage(map[string]string{
"baz": "hello",
@@ -3070,7 +3095,7 @@ func (s *DockerSuite) TestBuildNoContext(c *check.C) {
}
// TODO: TestCaching
-func (s *DockerSuite) TestBuildADDLocalAndRemoteFilesWithoutCache(c *check.C) {
+func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithoutcache"
name2 := "testbuildaddlocalandremotefilewithoutcache2"
server, err := fakeStorage(map[string]string{
@@ -3190,7 +3215,7 @@ func (s *DockerSuite) TestBuildForbiddenContextPath(c *check.C) {
}
-func (s *DockerSuite) TestBuildADDFileNotFound(c *check.C) {
+func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
ctx, err := fakeContext(`FROM scratch
ADD foo /usr/local/bar`,
@@ -4118,6 +4143,35 @@ func (s *DockerSuite) TestBuildFromGIT(c *check.C) {
}
}
+func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) {
+ name := "testbuildfromgit"
+ defer deleteImages(name)
+ git, err := fakeGIT("repo", map[string]string{
+ "docker/Dockerfile": `FROM busybox
+ ADD first /first
+ RUN [ -f /first ]
+ MAINTAINER docker`,
+ "docker/first": "test git data",
+ }, true)
+ if err != nil {
+ c.Fatal(err)
+ }
+ defer git.Close()
+
+ u := fmt.Sprintf("%s#master:docker", git.RepoURL)
+ _, err = buildImageFromPath(name, u, true)
+ if err != nil {
+ c.Fatal(err)
+ }
+ res, err := inspectField(name, "Author")
+ if err != nil {
+ c.Fatal(err)
+ }
+ if res != "docker" {
+ c.Fatalf("Maintainer should be docker, got %s", res)
+ }
+}
+
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
if _, err := buildImage(name,
@@ -4754,7 +4808,7 @@ func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
c.Fatalf("test5 was supposed to fail to find passwd")
}
- if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", strings.Replace(nonDockerfileFile, `\`, `\\`, -1)); !strings.Contains(out, expected) {
+ if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) {
c.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected)
}
@@ -5293,3 +5347,82 @@ func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
}
}
+
+func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
+ testRequires(c, NativeExecDriver)
+ testRequires(c, SameHostDaemon)
+ defer deleteImages()
+
+ cgroupParent := "test"
+ data, err := ioutil.ReadFile("/proc/self/cgroup")
+ if err != nil {
+ c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
+ }
+ selfCgroupPaths := parseCgroupPaths(string(data))
+ _, found := selfCgroupPaths["memory"]
+ if !found {
+ c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
+ }
+ cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-")
+ cmd.Stdin = strings.NewReader(`
+FROM busybox
+RUN cat /proc/self/cgroup
+`)
+
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
+ }
+}
+
+func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
+ // Check to make sure our build output prints the Dockerfile cmd
+ // property - there was a bug that caused it to be duplicated on the
+ // Step X line
+ name := "testbuildnodupoutput"
+
+ _, out, err := buildImageWithOut(name, `
+ FROM busybox
+ RUN env`, false)
+ if err != nil {
+ c.Fatalf("Build should have worked: %q", err)
+ }
+
+ exp := "\nStep 1 : RUN env\n"
+ if !strings.Contains(out, exp) {
+ c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
+ }
+}
+
+func (s *DockerSuite) TestBuildBadCmdFlag(c *check.C) {
+ name := "testbuildbadcmdflag"
+
+ _, out, err := buildImageWithOut(name, `
+ FROM busybox
+ MAINTAINER --boo joe@example.com`, false)
+ if err == nil {
+ c.Fatal("Build should have failed")
+ }
+
+ exp := "\nUnknown flag: boo\n"
+ if !strings.Contains(out, exp) {
+ c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
+ }
+}
+
+func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
+ // Test to make sure the bad command is quoted with just "s and
+ // not as a Go []string
+ name := "testbuildbadrunerrmsg"
+ _, out, err := buildImageWithOut(name, `
+ FROM busybox
+ RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3
+ if err == nil {
+ c.Fatal("Should have failed to build")
+ }
+
+ exp := `The command '/bin/sh -c badEXE a1 \& a2 a3' returned a non-zero code: 127`
+ if !strings.Contains(out, exp) {
+ c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp)
+ }
+}
diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go
index b9b319cf94..d4d4949955 100644
--- a/integration-cli/docker_cli_by_digest_test.go
+++ b/integration-cli/docker_cli_by_digest_test.go
@@ -115,6 +115,16 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
}
}
+func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
+ // pull from the registry using the @ reference
+ imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName)
+ cmd := exec.Command(dockerBinary, "pull", imageReference)
+ out, _, err := runCommandWithOutput(cmd)
+ if err == nil || !strings.Contains(out, "pulling with digest reference failed from v2 registry") {
+ c.Fatalf("expected non-zero exit status and correct error message when pulling non-existing image: %s", out)
+ }
+}
+
func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
pushDigest, err := setupImage()
if err != nil {
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index 391cd4ebc5..ef157c63f8 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -85,16 +85,11 @@ func (s *DockerSuite) TestCommitPausedContainer(c *check.C) {
c.Fatalf("failed to commit container to image: %s, %v", out, err)
}
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Paused}}", cleanedContainerID)
- out, _, _, err = runCommandWithStdoutStderr(cmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %v, output: %q", err, out)
- }
-
+ out, err = inspectField(cleanedContainerID, "State.Paused")
+ c.Assert(err, check.IsNil)
if !strings.Contains(out, "true") {
c.Fatalf("commit should not unpause a paused container")
}
-
}
func (s *DockerSuite) TestCommitNewFile(c *check.C) {
@@ -242,9 +237,7 @@ func (s *DockerSuite) TestCommitChange(c *check.C) {
for conf, value := range expected {
res, err := inspectField(imageId, conf)
- if err != nil {
- c.Errorf("failed to get value %s, error: %s", conf, err)
- }
+ c.Assert(err, check.IsNil)
if res != value {
c.Errorf("%s('%s'), expected %s", conf, res, value)
}
diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go
index 26e778e4f2..d79615f241 100644
--- a/integration-cli/docker_cli_cp_test.go
+++ b/integration-cli/docker_cli_cp_test.go
@@ -435,7 +435,6 @@ func (s *DockerSuite) TestCpVolumePath(c *check.C) {
}
cleanedContainerID := strings.TrimSpace(out)
- defer dockerCmd(c, "rm", "-fv", cleanedContainerID)
out, _ = dockerCmd(c, "wait", cleanedContainerID)
if strings.TrimSpace(out) != "0" {
diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go
index 646a8eafe0..019ea97fc6 100644
--- a/integration-cli/docker_cli_create_test.go
+++ b/integration-cli/docker_cli_create_test.go
@@ -2,6 +2,7 @@ package main
import (
"encoding/json"
+ "fmt"
"os"
"os/exec"
"reflect"
@@ -290,3 +291,53 @@ func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) {
c.Fatalf("hostname not set, expected `web.0`, got: %s", out)
}
}
+
+func (s *DockerSuite) TestCreateRM(c *check.C) {
+ // Test to make sure we can 'rm' a new container that is in
+ // "Created" state, and has ever been run. Test "rm -f" too.
+
+ // create a container
+ createCmd := exec.Command(dockerBinary, "create", "busybox")
+ out, _, err := runCommandWithOutput(createCmd)
+ if err != nil {
+ c.Fatalf("Failed to create container:%s\n%s", out, err)
+ }
+ cID := strings.TrimSpace(out)
+
+ rmCmd := exec.Command(dockerBinary, "rm", cID)
+ out, _, err = runCommandWithOutput(rmCmd)
+ if err != nil {
+ c.Fatalf("Failed to rm container:%s\n%s", out, err)
+ }
+
+ // Now do it again so we can "rm -f" this time
+ createCmd = exec.Command(dockerBinary, "create", "busybox")
+ out, _, err = runCommandWithOutput(createCmd)
+ if err != nil {
+ c.Fatalf("Failed to create 2nd container:%s\n%s", out, err)
+ }
+
+ cID = strings.TrimSpace(out)
+ rmCmd = exec.Command(dockerBinary, "rm", "-f", cID)
+ out, _, err = runCommandWithOutput(rmCmd)
+ if err != nil {
+ c.Fatalf("Failed to rm -f container:%s\n%s", out, err)
+ }
+}
+
+func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) {
+ testRequires(c, SameHostDaemon)
+
+ cmd := exec.Command(dockerBinary, "create", "busybox")
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatal(err, out)
+ }
+ id := strings.TrimSpace(out)
+
+ cmd = exec.Command(dockerBinary, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox")
+ out, _, err = runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("Create container with ipc mode container should success with non running container: %s\n%s", out, err)
+ }
+}
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index e099995ad3..d668781dcf 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -6,12 +6,16 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "net"
"os"
"os/exec"
"path/filepath"
+ "regexp"
+ "strconv"
"strings"
"time"
+ "github.com/docker/libnetwork/iptables"
"github.com/docker/libtrust"
"github.com/go-check/check"
)
@@ -230,7 +234,7 @@ func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) {
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
- c.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content))
+ c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content))
}
}
@@ -240,7 +244,7 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) {
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
- c.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content))
+ c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content))
}
}
@@ -250,7 +254,7 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) {
}
content, _ := ioutil.ReadFile(s.d.logFile.Name())
if !strings.Contains(string(content), `level=debug`) {
- c.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content))
+ c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content))
}
}
@@ -280,35 +284,6 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
}
}
-// #9629
-func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) {
- if err := s.d.StartWithBusybox(); err != nil {
- c.Fatal(err)
- }
-
- tmp, err := ioutil.TempDir(os.TempDir(), "")
- if err != nil {
- c.Fatal(err)
- }
- defer os.RemoveAll(tmp)
-
- if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
- c.Fatal(err)
- }
-
- if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
- c.Fatal(err, out)
- }
-
- if err := s.d.Restart(); err != nil {
- c.Fatal(err)
- }
-
- if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
- c.Fatal(err, out)
- }
-}
-
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
// TODO: skip or update for Windows daemon
os.Remove("/etc/docker/key.json")
@@ -356,76 +331,6 @@ func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
}
}
-// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers
-// without corresponding volume json
-func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) {
- graphDir := filepath.Join(os.TempDir(), "docker-test")
- defer os.RemoveAll(graphDir)
- if err := s.d.StartWithBusybox("-g", graphDir); err != nil {
- c.Fatal(err)
- }
-
- tmpDir := filepath.Join(os.TempDir(), "test")
- defer os.RemoveAll(tmpDir)
-
- if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
- c.Fatal(err, out)
- }
-
- if err := s.d.Stop(); err != nil {
- c.Fatal(err)
- }
-
- // Remove this since we're expecting the daemon to re-create it too
- if err := os.RemoveAll(tmpDir); err != nil {
- c.Fatal(err)
- }
-
- configDir := filepath.Join(graphDir, "volumes")
-
- if err := os.RemoveAll(configDir); err != nil {
- c.Fatal(err)
- }
-
- if err := s.d.Start("-g", graphDir); err != nil {
- c.Fatal(err)
- }
-
- if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
- c.Fatalf("expected volume path %s to exist but it does not", tmpDir)
- }
-
- dir, err := ioutil.ReadDir(configDir)
- if err != nil {
- c.Fatal(err)
- }
- if len(dir) == 0 {
- c.Fatalf("expected volumes config dir to contain data for new volume")
- }
-
- // Now with just removing the volume config and not the volume data
- if err := s.d.Stop(); err != nil {
- c.Fatal(err)
- }
-
- if err := os.RemoveAll(configDir); err != nil {
- c.Fatal(err)
- }
-
- if err := s.d.Start("-g", graphDir); err != nil {
- c.Fatal(err)
- }
-
- dir, err = ioutil.ReadDir(configDir)
- if err != nil {
- c.Fatal(err)
- }
-
- if len(dir) == 0 {
- c.Fatalf("expected volumes config dir to contain data for new volume")
- }
-}
-
// GH#11320 - verify that the daemon exits on failure properly
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
@@ -447,6 +352,320 @@ func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) {
}
}
+func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) {
+ d := s.d
+ err := d.Start("--bridge", "nosuchbridge")
+ c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail"))
+ defer d.Restart()
+
+ bridgeName := "external-bridge"
+ bridgeIp := "192.169.1.1/24"
+ _, bridgeIPNet, _ := net.ParseCIDR(bridgeIp)
+
+ out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, bridgeName)
+
+ err = d.StartWithBusybox("--bridge", bridgeName)
+ c.Assert(err, check.IsNil)
+
+ ipTablesSearchString := bridgeIPNet.String()
+ ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+ out, _, err = runCommandWithOutput(ipTablesCmd)
+ c.Assert(err, check.IsNil)
+
+ c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
+ check.Commentf("iptables output should have contained %q, but was %q",
+ ipTablesSearchString, out))
+
+ _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top")
+ c.Assert(err, check.IsNil)
+
+ containerIp := d.findContainerIP("ExtContainer")
+ ip := net.ParseIP(containerIp)
+ c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
+ check.Commentf("Container IP-Address must be in the same subnet range : %s",
+ containerIp))
+}
+
+func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) {
+ args := []string{"link", "add", "name", ifName, "type", ifType}
+ ipLinkCmd := exec.Command("ip", args...)
+ out, _, err := runCommandWithOutput(ipLinkCmd)
+ if err != nil {
+ return out, err
+ }
+
+ ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up")
+ out, _, err = runCommandWithOutput(ifCfgCmd)
+ return out, err
+}
+
+func deleteInterface(c *check.C, ifName string) {
+ ifCmd := exec.Command("ip", "link", "delete", ifName)
+ out, _, err := runCommandWithOutput(ifCmd)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+
+ flushCmd := exec.Command("iptables", "-t", "nat", "--flush")
+ out, _, err = runCommandWithOutput(flushCmd)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+
+ flushCmd = exec.Command("iptables", "--flush")
+ out, _, err = runCommandWithOutput(flushCmd)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) {
+ // TestDaemonBridgeIP Steps
+ // 1. Delete the existing docker0 Bridge
+ // 2. Set --bip daemon configuration and start the new Docker Daemon
+ // 3. Check if the bip config has taken effect using ifconfig and iptables commands
+ // 4. Launch a Container and make sure the IP-Address is in the expected subnet
+ // 5. Delete the docker0 Bridge
+ // 6. Restart the Docker Daemon (via defered action)
+ // This Restart takes care of bringing docker0 interface back to auto-assigned IP
+
+ defaultNetworkBridge := "docker0"
+ deleteInterface(c, defaultNetworkBridge)
+
+ d := s.d
+
+ bridgeIp := "192.169.1.1/24"
+ ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIp)
+
+ err := d.StartWithBusybox("--bip", bridgeIp)
+ c.Assert(err, check.IsNil)
+ defer d.Restart()
+
+ ifconfigSearchString := ip.String()
+ ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge)
+ out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd)
+ c.Assert(err, check.IsNil)
+
+ c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true,
+ check.Commentf("ifconfig output should have contained %q, but was %q",
+ ifconfigSearchString, out))
+
+ ipTablesSearchString := bridgeIPNet.String()
+ ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+ out, _, err = runCommandWithOutput(ipTablesCmd)
+ c.Assert(err, check.IsNil)
+
+ c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
+ check.Commentf("iptables output should have contained %q, but was %q",
+ ipTablesSearchString, out))
+
+ out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top")
+ c.Assert(err, check.IsNil)
+
+ containerIp := d.findContainerIP("test")
+ ip = net.ParseIP(containerIp)
+ c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
+ check.Commentf("Container IP-Address must be in the same subnet range : %s",
+ containerIp))
+ deleteInterface(c, defaultNetworkBridge)
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) {
+ if err := s.d.Start(); err != nil {
+ c.Fatalf("Could not start daemon: %v", err)
+ }
+ defer s.d.Restart()
+ if err := s.d.Stop(); err != nil {
+ c.Fatalf("Could not stop daemon: %v", err)
+ }
+
+ // now we will change the docker0's IP and then try starting the daemon
+ bridgeIP := "192.169.100.1/24"
+ _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
+
+ ipCmd := exec.Command("ifconfig", "docker0", bridgeIP)
+ stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd)
+ if err != nil {
+ c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
+ }
+
+ if err := s.d.Start("--bip", bridgeIP); err != nil {
+ c.Fatalf("Could not start daemon: %v", err)
+ }
+
+ //check if the iptables contains new bridgeIP MASQUERADE rule
+ ipTablesSearchString := bridgeIPNet.String()
+ ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+ out, _, err := runCommandWithOutput(ipTablesCmd)
+ if err != nil {
+ c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+ }
+ if !strings.Contains(out, ipTablesSearchString) {
+ c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out)
+ }
+}
+
+func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) {
+ d := s.d
+
+ bridgeName := "external-bridge"
+ bridgeIp := "192.169.1.1/24"
+
+ out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, bridgeName)
+
+ args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"}
+ err = d.StartWithBusybox(args...)
+ c.Assert(err, check.IsNil)
+ defer d.Restart()
+
+ for i := 0; i < 4; i++ {
+ cName := "Container" + strconv.Itoa(i)
+ out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
+ if err != nil {
+ c.Assert(strings.Contains(out, "no available ip addresses"), check.Equals, true,
+ check.Commentf("Could not run a Container : %s %s", err.Error(), out))
+ }
+ }
+}
+
+func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) {
+ d := s.d
+
+ ipStr := "192.170.1.1/24"
+ ip, _, _ := net.ParseCIDR(ipStr)
+ args := []string{"--ip", ip.String()}
+ err := d.StartWithBusybox(args...)
+ c.Assert(err, check.IsNil)
+ defer d.Restart()
+
+ out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
+ c.Assert(err, check.NotNil,
+ check.Commentf("Running a container must fail with an invalid --ip option"))
+ c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true)
+
+ ifName := "dummy"
+ out, err = createInterface(c, "dummy", ifName, ipStr)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, ifName)
+
+ _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
+ c.Assert(err, check.IsNil)
+
+ ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+ out, _, err = runCommandWithOutput(ipTablesCmd)
+ c.Assert(err, check.IsNil)
+
+ regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String())
+ matched, _ := regexp.MatchString(regex, out)
+ c.Assert(matched, check.Equals, true,
+ check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) {
+ d := s.d
+
+ bridgeName := "external-bridge"
+ bridgeIp := "192.169.1.1/24"
+
+ out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, bridgeName)
+
+ args := []string{"--bridge", bridgeName, "--icc=false"}
+ err = d.StartWithBusybox(args...)
+ c.Assert(err, check.IsNil)
+ defer d.Restart()
+
+ ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
+ out, _, err = runCommandWithOutput(ipTablesCmd)
+ c.Assert(err, check.IsNil)
+
+ regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
+ matched, _ := regexp.MatchString(regex, out)
+ c.Assert(matched, check.Equals, true,
+ check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+
+ // Pinging another container must fail with --icc=false
+ pingContainers(c, d, true)
+
+ ipStr := "192.171.1.1/24"
+ ip, _, _ := net.ParseCIDR(ipStr)
+ ifName := "icc-dummy"
+
+ createInterface(c, "dummy", ifName, ipStr)
+
+ // But, Pinging external or a Host interface must succeed
+ pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String())
+ runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd}
+ _, err = d.Cmd("run", runArgs...)
+ c.Assert(err, check.IsNil)
+}
+
+func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) {
+ d := s.d
+
+ bridgeName := "external-bridge"
+ bridgeIp := "192.169.1.1/24"
+
+ out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, bridgeName)
+
+ args := []string{"--bridge", bridgeName, "--icc=false"}
+ err = d.StartWithBusybox(args...)
+ c.Assert(err, check.IsNil)
+ defer d.Restart()
+
+ ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
+ out, _, err = runCommandWithOutput(ipTablesCmd)
+ c.Assert(err, check.IsNil)
+
+ regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
+ matched, _ := regexp.MatchString(regex, out)
+ c.Assert(matched, check.Equals, true,
+ check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+
+ out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567")
+ c.Assert(err, check.IsNil, check.Commentf(out))
+
+ out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567")
+ c.Assert(err, check.IsNil, check.Commentf(out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) {
+ bridgeName := "external-bridge"
+ bridgeIp := "192.169.1.1/24"
+
+ out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+ c.Assert(err, check.IsNil, check.Commentf(out))
+ defer deleteInterface(c, bridgeName)
+
+ args := []string{"--bridge", bridgeName, "--icc=false"}
+ err = s.d.StartWithBusybox(args...)
+ c.Assert(err, check.IsNil)
+ defer s.d.Restart()
+
+ _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
+ c.Assert(err, check.IsNil)
+ _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top")
+ c.Assert(err, check.IsNil)
+
+ childIP := s.d.findContainerIP("child")
+ parentIP := s.d.findContainerIP("parent")
+
+ sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
+ destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
+ if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) {
+ c.Fatal("Iptables rules not found")
+ }
+
+ s.d.Cmd("rm", "--link", "parent/http")
+ if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) {
+ c.Fatal("Iptables rules should be removed when unlink")
+ }
+
+ s.d.Cmd("kill", "child")
+ s.d.Cmd("kill", "parent")
+}
+
func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) {
testRequires(c, NativeExecDriver)
@@ -661,8 +880,8 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) {
if err == nil {
c.Fatalf("Logs should fail with \"none\" driver")
}
- if !strings.Contains(out, `\"logs\" command is supported only for \"json-file\" logging driver`) {
- c.Fatalf("There should be error about non-json-file driver, got %s", out)
+ if !strings.Contains(out, `"logs" command is supported only for "json-file" logging driver`) {
+ c.Fatalf("There should be error about non-json-file driver, got: %s", out)
}
}
@@ -734,7 +953,7 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) {
}
}
-func (s *DockerDaemonSuite) TestDaemonwithwrongkey(c *check.C) {
+func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
type Config struct {
Crv string `json:"crv"`
D string `json:"d"`
@@ -889,3 +1108,60 @@ func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) {
c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out)
}
}
+
+func pingContainers(c *check.C, d *Daemon, expectFailure bool) {
+ var dargs []string
+ if d != nil {
+ dargs = []string{"--host", d.sock()}
+ }
+
+ args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top")
+ _, err := runCommand(exec.Command(dockerBinary, args...))
+ c.Assert(err, check.IsNil)
+
+ args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c")
+ pingCmd := "ping -c 1 %s -W 1"
+ args = append(args, fmt.Sprintf(pingCmd, "alias1"))
+ _, err = runCommand(exec.Command(dockerBinary, args...))
+
+ if expectFailure {
+ c.Assert(err, check.NotNil)
+ } else {
+ c.Assert(err, check.IsNil)
+ }
+
+ args = append(dargs, "rm", "-f", "container1")
+ runCommand(exec.Command(dockerBinary, args...))
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) {
+ c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+ socket := filepath.Join(s.d.folder, "docker.sock")
+
+ out, err := s.d.Cmd("run", "-d", "-v", socket+":/sock", "busybox")
+ c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+ c.Assert(s.d.Restart(), check.IsNil)
+}
+
+func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) {
+ c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+ out, err := s.d.Cmd("run", "-d", "busybox", "top")
+ c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+ id := strings.TrimSpace(out)
+ c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
+ c.Assert(s.d.Start(), check.IsNil)
+ mountOut, err := exec.Command("mount").CombinedOutput()
+ c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+ c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, check.Commentf("Something mounted from older daemon start: %s", mountOut))
+}
+
+func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
+ c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil)
+
+ out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l")
+ c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+ c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
+ check.Commentf("There shouldn't be eth0 in container when network is disabled: %s", out))
+}
diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go
index 332b128ed8..725b762864 100644
--- a/integration-cli/docker_cli_diff_test.go
+++ b/integration-cli/docker_cli_diff_test.go
@@ -40,12 +40,14 @@ func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) {
func (s *DockerSuite) TestDiffEnsureDockerinitFilesAreIgnored(c *check.C) {
// this is a list of files which shouldn't show up in `docker diff`
dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"}
+ containerCount := 5
// we might not run into this problem from the first run, so start a few containers
- for i := 0; i < 20; i++ {
+ for i := 0; i < containerCount; i++ {
containerCmd := `echo foo > /root/bar`
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd)
out, _, err := runCommandWithOutput(runCmd)
+
if err != nil {
c.Fatal(out, err)
}
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
index 80cc0c69d5..d6518ce8d4 100644
--- a/integration-cli/docker_cli_events_test.go
+++ b/integration-cli/docker_cli_events_test.go
@@ -13,6 +13,41 @@ import (
"github.com/go-check/check"
)
+func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) {
+ image := "busybox"
+
+ // Start stopwatch, generate an event
+ time.Sleep(time.Second) // so that we don't grab events from previous test occured in the same second
+ start := daemonTime(c)
+ time.Sleep(time.Second) // remote API precision is only a second, wait a while before creating an event
+ dockerCmd(c, "tag", image, "timestamptest:1")
+ dockerCmd(c, "rmi", "timestamptest:1")
+ time.Sleep(time.Second) // so that until > since
+ end := daemonTime(c)
+
+ // List of available time formats to --since
+ unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) }
+ rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) }
+
+ // --since=$start must contain only the 'untag' event
+ for _, f := range []func(time.Time) string{unixTs, rfc3339} {
+ since, until := f(start), f(end)
+ cmd := exec.Command(dockerBinary, "events", "--since="+since, "--until="+until)
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("docker events cmd failed: %v\nout=%s", err, out)
+ }
+ events := strings.Split(strings.TrimSpace(out), "\n")
+ if len(events) != 2 {
+ c.Fatalf("unexpected events, was expecting only 2 events tag/untag (since=%s, until=%s) out=%s", since, until, out)
+ }
+ if !strings.Contains(out, "untag") {
+ c.Fatalf("expected 'untag' event not found (since=%s, until=%s) out=%s", since, until, out)
+ }
+ }
+
+}
+
func (s *DockerSuite) TestEventsUntag(c *check.C) {
image := "busybox"
dockerCmd(c, "tag", image, "utest:tag1")
@@ -195,6 +230,31 @@ func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) {
}
}
+func (s *DockerSuite) TestEventsImageTag(c *check.C) {
+ time.Sleep(time.Second * 2) // because API has seconds granularity
+ since := daemonTime(c).Unix()
+ image := "testimageevents:tag"
+ dockerCmd(c, "tag", "busybox", image)
+
+ eventsCmd := exec.Command(dockerBinary, "events",
+ fmt.Sprintf("--since=%d", since),
+ fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
+ out, _, err := runCommandWithOutput(eventsCmd)
+ c.Assert(err, check.IsNil)
+
+ events := strings.Split(strings.TrimSpace(out), "\n")
+ if len(events) != 1 {
+ c.Fatalf("was expecting 1 event. out=%s", out)
+ }
+ event := strings.TrimSpace(events[0])
+ expectedStr := image + ": tag"
+
+ if !strings.HasSuffix(event, expectedStr) {
+ c.Fatalf("wrong event format. expected='%s' got=%s", expectedStr, event)
+ }
+
+}
+
func (s *DockerSuite) TestEventsImagePull(c *check.C) {
since := daemonTime(c).Unix()
testRequires(c, Network)
diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go
index 4b36d7b532..9abafb8fa9 100644
--- a/integration-cli/docker_cli_exec_test.go
+++ b/integration-cli/docker_cli_exec_test.go
@@ -634,28 +634,3 @@ func (s *DockerSuite) TestExecWithUser(c *check.C) {
}
}
-
-func (s *DockerSuite) TestExecWithPrivileged(c *check.C) {
-
- runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "top")
- if out, _, err := runCommandWithOutput(runCmd); err != nil {
- c.Fatal(out, err)
- }
-
- cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sda b 8 0")
- out, _, err := runCommandWithOutput(cmd)
- if err == nil || !strings.Contains(out, "Operation not permitted") {
- c.Fatalf("exec mknod in --cap-drop=ALL container without --privileged should failed")
- }
-
- cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
- out, _, err = runCommandWithOutput(cmd)
- if err != nil {
- c.Fatal(err, out)
- }
-
- if actual := strings.TrimSpace(out); actual != "ok" {
- c.Fatalf("exec mknod in --cap-drop=ALL container with --privileged failed: %v, output: %q", err, out)
- }
-
-}
diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go
new file mode 100644
index 0000000000..4cf05c91f9
--- /dev/null
+++ b/integration-cli/docker_cli_experimental_test.go
@@ -0,0 +1,24 @@
+// +build experimental
+
+package main
+
+import (
+ "os/exec"
+ "strings"
+
+ "github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestExperimentalVersion(c *check.C) {
+ versionCmd := exec.Command(dockerBinary, "version")
+ out, _, err := runCommandWithOutput(versionCmd)
+ if err != nil {
+ c.Fatalf("failed to execute docker version: %s, %v", out, err)
+ }
+
+ for _, line := range strings.Split(out, "\n") {
+ if strings.HasPrefix(line, "Client version:") || strings.HasPrefix(line, "Server version:") {
+ c.Assert(line, check.Matches, "*-experimental")
+ }
+ }
+}
diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go
index 3370a96761..22772704ed 100644
--- a/integration-cli/docker_cli_export_import_test.go
+++ b/integration-cli/docker_cli_export_import_test.go
@@ -12,18 +12,12 @@ import (
func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) {
containerID := "testexportcontainerandimportimage"
- runCmd := exec.Command(dockerBinary, "run", "-d", "--name", containerID, "busybox", "true")
+ runCmd := exec.Command(dockerBinary, "run", "--name", containerID, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatal("failed to create a container", out, err)
}
- inspectCmd := exec.Command(dockerBinary, "inspect", containerID)
- out, _, err = runCommandWithOutput(inspectCmd)
- if err != nil {
- c.Fatalf("output should've been a container id: %s %s ", containerID, err)
- }
-
exportCmd := exec.Command(dockerBinary, "export", containerID)
if out, _, err = runCommandWithOutput(exportCmd); err != nil {
c.Fatalf("failed to export container: %s, %v", out, err)
@@ -37,30 +31,21 @@ func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) {
}
cleanedImageID := strings.TrimSpace(out)
-
- inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("output should've been an image id: %s, %v", out, err)
+ if cleanedImageID == "" {
+ c.Fatalf("output should have been an image id, got: %s", out)
}
-
}
// Used to test output flag in the export command
func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) {
containerID := "testexportcontainerwithoutputandimportimage"
- runCmd := exec.Command(dockerBinary, "run", "-d", "--name", containerID, "busybox", "true")
+ runCmd := exec.Command(dockerBinary, "run", "--name", containerID, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatal("failed to create a container", out, err)
}
- inspectCmd := exec.Command(dockerBinary, "inspect", containerID)
- out, _, err = runCommandWithOutput(inspectCmd)
- if err != nil {
- c.Fatalf("output should've been a container id: %s %s ", containerID, err)
- }
-
defer os.Remove("testexp.tar")
exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", containerID)
@@ -81,10 +66,7 @@ func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) {
}
cleanedImageID := strings.TrimSpace(out)
-
- inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("output should've been an image id: %s, %v", out, err)
+ if cleanedImageID == "" {
+ c.Fatalf("output should have been an image id, got: %s", out)
}
-
}
diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go
index d6903e4fb9..86b0b3bbc7 100644
--- a/integration-cli/docker_cli_help_test.go
+++ b/integration-cli/docker_cli_help_test.go
@@ -152,3 +152,32 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
}
}
+
+func (s *DockerSuite) TestHelpErrorStderr(c *check.C) {
+ // If we had a generic CLI test file this one shoudl go in there
+
+ cmd := exec.Command(dockerBinary, "boogie")
+ out, ec, err := runCommandWithOutput(cmd)
+ if err == nil || ec == 0 {
+ c.Fatalf("Boogie command should have failed")
+ }
+
+ expected := "docker: 'boogie' is not a docker command. See 'docker --help'.\n"
+ if out != expected {
+ c.Fatalf("Bad output from boogie\nGot:%s\nExpected:%s", out, expected)
+ }
+
+ cmd = exec.Command(dockerBinary, "rename", "foo", "bar")
+ out, ec, err = runCommandWithOutput(cmd)
+ if err == nil || ec == 0 {
+ c.Fatalf("Rename should have failed")
+ }
+
+ expected = `Error response from daemon: no such id: foo
+Error: failed to rename container named foo
+`
+ if out != expected {
+ c.Fatalf("Bad output from rename\nGot:%s\nExpected:%s", out, expected)
+ }
+
+}
diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go
index 201dbaa580..f4bd085214 100644
--- a/integration-cli/docker_cli_import_test.go
+++ b/integration-cli/docker_cli_import_test.go
@@ -39,3 +39,14 @@ func (s *DockerSuite) TestImportDisplay(c *check.C) {
}
}
+
+func (s *DockerSuite) TestImportBadURL(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "import", "http://nourl/bad")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err == nil {
+ c.Fatal("import was supposed to fail but didn't")
+ }
+ if !strings.Contains(out, "dial tcp") {
+ c.Fatalf("expected an error msg but didn't get one:\n%s", out)
+ }
+}
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
index a7a931e852..cf7738d14f 100644
--- a/integration-cli/docker_cli_info_test.go
+++ b/integration-cli/docker_cli_info_test.go
@@ -4,6 +4,7 @@ import (
"os/exec"
"strings"
+ "github.com/docker/docker/utils"
"github.com/go-check/check"
)
@@ -26,12 +27,16 @@ func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) {
"CPUs:",
"Total Memory:",
"Kernel Version:",
- "Storage Driver:"}
+ "Storage Driver:",
+ }
+
+ if utils.ExperimentalBuild() {
+ stringsToCheck = append(stringsToCheck, "Experimental: true")
+ }
for _, linePrefix := range stringsToCheck {
if !strings.Contains(out, linePrefix) {
c.Errorf("couldn't find string %v in output", linePrefix)
}
}
-
}
diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go
index 58c61a9d0f..066e014b4d 100644
--- a/integration-cli/docker_cli_inspect_test.go
+++ b/integration-cli/docker_cli_inspect_test.go
@@ -12,13 +12,10 @@ import (
func (s *DockerSuite) TestInspectImage(c *check.C) {
imageTest := "emptyfs"
imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
- imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest)
- out, exitCode, err := runCommandWithOutput(imagesCmd)
- if exitCode != 0 || err != nil {
- c.Fatalf("failed to inspect image: %s, %v", out, err)
- }
+ id, err := inspectField(imageTest, "Id")
+ c.Assert(err, check.IsNil)
- if id := strings.TrimSuffix(out, "\n"); id != imageTestID {
+ if id != imageTestID {
c.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id)
}
@@ -33,33 +30,28 @@ func (s *DockerSuite) TestInspectInt64(c *check.C) {
out = strings.TrimSpace(out)
- inspectCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.Memory}}", out)
- inspectOut, _, err := runCommandWithOutput(inspectCmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %v, output: %q", err, inspectOut)
- }
+ inspectOut, err := inspectField(out, "HostConfig.Memory")
+ c.Assert(err, check.IsNil)
- if strings.TrimSpace(inspectOut) != "314572800" {
+ if inspectOut != "314572800" {
c.Fatalf("inspect got wrong value, got: %q, expected: 314572800", inspectOut)
}
}
func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) {
imageTest := "emptyfs"
- imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Size}}'", imageTest)
- out, exitCode, err := runCommandWithOutput(imagesCmd)
- if exitCode != 0 || err != nil {
- c.Fatalf("failed to inspect image: %s, %v", out, err)
- }
- size, err := strconv.Atoi(strings.TrimSuffix(out, "\n"))
+ out, err := inspectField(imageTest, "Size")
+ c.Assert(err, check.IsNil)
+
+ size, err := strconv.Atoi(out)
if err != nil {
c.Fatalf("failed to inspect size of the image: %s, %v", out, err)
}
//now see if the size turns out to be the same
formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size)
- imagesCmd = exec.Command(dockerBinary, "inspect", formatStr, imageTest)
- out, exitCode, err = runCommandWithOutput(imagesCmd)
+ imagesCmd := exec.Command(dockerBinary, "inspect", formatStr, imageTest)
+ out, exitCode, err := runCommandWithOutput(imagesCmd)
if exitCode != 0 || err != nil {
c.Fatalf("failed to inspect image: %s, %v", out, err)
}
@@ -69,7 +61,8 @@ func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) {
}
func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) {
- runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`)
+ runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
+ runCmd.Stdin = strings.NewReader("blahblah")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
@@ -77,12 +70,10 @@ func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) {
id := strings.TrimSpace(out)
- runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.ExitCode}}'", id)
- out, _, err = runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %s, %v", out, err)
- }
- exitCode, err := strconv.Atoi(strings.TrimSuffix(out, "\n"))
+ out, err = inspectField(id, "State.ExitCode")
+ c.Assert(err, check.IsNil)
+
+ exitCode, err := strconv.Atoi(out)
if err != nil {
c.Fatalf("failed to inspect exitcode of the container: %s, %v", out, err)
}
diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go
index aa11a782b5..47b4e47b9a 100644
--- a/integration-cli/docker_cli_kill_test.go
+++ b/integration-cli/docker_cli_kill_test.go
@@ -15,11 +15,7 @@ func (s *DockerSuite) TestKillContainer(c *check.C) {
}
cleanedContainerID := strings.TrimSpace(out)
-
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("out should've been a container id: %s, %v", out, err)
- }
+ c.Assert(waitRun(cleanedContainerID), check.IsNil)
killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
if out, _, err = runCommandWithOutput(killCmd); err != nil {
@@ -35,31 +31,22 @@ func (s *DockerSuite) TestKillContainer(c *check.C) {
if strings.Contains(out, cleanedContainerID) {
c.Fatal("killed container is still running")
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestKillofStoppedContainer(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
out, _, err := runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(out, err)
- }
+ c.Assert(err, check.IsNil)
cleanedContainerID := strings.TrimSpace(out)
stopCmd := exec.Command(dockerBinary, "stop", cleanedContainerID)
- if out, _, err = runCommandWithOutput(stopCmd); err != nil {
- c.Fatalf("failed to stop container: %s, %v", out, err)
- }
+ out, _, err = runCommandWithOutput(stopCmd)
+ c.Assert(err, check.IsNil, check.Commentf("failed to stop container: %s, %v", out, err))
killCmd := exec.Command(dockerBinary, "kill", "-s", "30", cleanedContainerID)
- if _, _, err = runCommandWithOutput(killCmd); err == nil {
- c.Fatalf("kill succeeded on a stopped container")
- }
-
- deleteContainer(cleanedContainerID)
+ _, _, err = runCommandWithOutput(killCmd)
+ c.Assert(err, check.Not(check.IsNil), check.Commentf("Kill succeeded on a stopped container"))
}
func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) {
@@ -70,11 +57,7 @@ func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) {
}
cleanedContainerID := strings.TrimSpace(out)
-
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("out should've been a container id: %s, %v", out, err)
- }
+ c.Assert(waitRun(cleanedContainerID), check.IsNil)
killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
if out, _, err = runCommandWithOutput(killCmd); err != nil {
@@ -90,7 +73,4 @@ func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) {
if strings.Contains(out, cleanedContainerID) {
c.Fatal("killed container is still running")
}
-
- deleteContainer(cleanedContainerID)
-
}
diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go
index 6bb173c10c..1f7432a8bf 100644
--- a/integration-cli/docker_cli_links_test.go
+++ b/integration-cli/docker_cli_links_test.go
@@ -10,12 +10,10 @@ import (
"strings"
"time"
- "github.com/docker/docker/pkg/iptables"
"github.com/go-check/check"
)
func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) {
-
runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
@@ -111,31 +109,6 @@ func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) {
}
-func (s *DockerSuite) TestLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) {
- testRequires(c, SameHostDaemon)
-
- dockerCmd(c, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
- dockerCmd(c, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top")
-
- childIP := findContainerIP(c, "child")
- parentIP := findContainerIP(c, "parent")
-
- sourceRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
- destinationRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
- if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) {
- c.Fatal("Iptables rules not found")
- }
-
- dockerCmd(c, "rm", "--link", "parent/http")
- if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) {
- c.Fatal("Iptables rules should be removed when unlink")
- }
-
- dockerCmd(c, "kill", "child")
- dockerCmd(c, "kill", "parent")
-
-}
-
func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) {
var (
expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}}
@@ -252,7 +225,7 @@ func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) {
}
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true"))
- if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") {
+ if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior") {
c.Fatalf("Running container linking to a container with --net host should have failed: %s", out)
}
@@ -331,3 +304,23 @@ func (s *DockerSuite) TestLinksEnvs(c *check.C) {
c.Fatalf("Incorrect output: %s", out)
}
}
+
+func (s *DockerSuite) TestLinkShortDefinition(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "shortlinkdef", "busybox", "top")
+ out, _, err := runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ cid := strings.TrimSpace(out)
+ c.Assert(waitRun(cid), check.IsNil)
+
+ runCmd = exec.Command(dockerBinary, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top")
+ out, _, err = runCommandWithOutput(runCmd)
+ c.Assert(err, check.IsNil)
+
+ cid2 := strings.TrimSpace(out)
+ c.Assert(waitRun(cid2), check.IsNil)
+
+ links, err := inspectFieldJSON(cid2, "HostConfig.Links")
+ c.Assert(err, check.IsNil)
+ c.Assert(links, check.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]")
+}
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
index 0a3e1af981..2e41f8a192 100644
--- a/integration-cli/docker_cli_logs_test.go
+++ b/integration-cli/docker_cli_logs_test.go
@@ -1,9 +1,12 @@
package main
import (
+ "encoding/json"
"fmt"
+ "io"
"os/exec"
"regexp"
+ "strconv"
"strings"
"time"
@@ -32,9 +35,6 @@ func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) {
if len(out) != testLen+1 {
c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
}
-
- deleteContainer(cleanedContainerID)
-
}
// Regression test: When going over the PageSize, it used to panic (gh#4851)
@@ -58,9 +58,6 @@ func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) {
if len(out) != testLen+1 {
c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
}
-
- deleteContainer(cleanedContainerID)
-
}
// Regression test: When going much over the PageSize, it used to block (gh#4851)
@@ -84,9 +81,6 @@ func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) {
if len(out) != testLen+1 {
c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestLogsTimestamps(c *check.C) {
@@ -126,9 +120,6 @@ func (s *DockerSuite) TestLogsTimestamps(c *check.C) {
}
}
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) {
@@ -157,9 +148,6 @@ func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) {
if stderr != msg {
c.Fatalf("Expected %v in stderr stream, got %v", msg, stderr)
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) {
@@ -188,9 +176,6 @@ func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) {
if stdout != msg {
c.Fatalf("Expected %v in stdout stream, got %v", msg, stdout)
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestLogsTail(c *check.C) {
@@ -240,8 +225,6 @@ func (s *DockerSuite) TestLogsTail(c *check.C) {
if len(lines) != testLen+1 {
c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
}
-
- deleteContainer(cleanedContainerID)
}
func (s *DockerSuite) TestLogsFollowStopped(c *check.C) {
@@ -272,8 +255,81 @@ func (s *DockerSuite) TestLogsFollowStopped(c *check.C) {
case <-time.After(1 * time.Second):
c.Fatal("Following logs is hanged")
}
+}
- deleteContainer(cleanedContainerID)
+func (s *DockerSuite) TestLogsSince(c *check.C) {
+ name := "testlogssince"
+ runCmd := exec.Command(dockerBinary, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo `date +%s` log$i; done")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("run failed with errors: %s, %v", out, err)
+ }
+
+ log2Line := strings.Split(strings.Split(out, "\n")[1], " ")
+ t, err := strconv.ParseInt(log2Line[0], 10, 64) // the timestamp log2 is writen
+ c.Assert(err, check.IsNil)
+ since := t + 1 // add 1s so log1 & log2 doesn't show up
+ logsCmd := exec.Command(dockerBinary, "logs", "-t", fmt.Sprintf("--since=%v", since), name)
+
+ out, _, err = runCommandWithOutput(logsCmd)
+ if err != nil {
+ c.Fatalf("failed to log container: %s, %v", out, err)
+ }
+
+ // Skip 2 seconds
+ unexpected := []string{"log1", "log2"}
+ for _, v := range unexpected {
+ if strings.Contains(out, v) {
+ c.Fatalf("unexpected log message returned=%v, since=%v\nout=%v", v, since, out)
+ }
+ }
+
+ // Test with default value specified and parameter omitted
+ expected := []string{"log1", "log2", "log3"}
+ for _, cmd := range []*exec.Cmd{
+ exec.Command(dockerBinary, "logs", "-t", name),
+ exec.Command(dockerBinary, "logs", "-t", "--since=0", name),
+ } {
+ out, _, err = runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("failed to log container: %s, %v", out, err)
+ }
+ for _, v := range expected {
+ if !strings.Contains(out, v) {
+ c.Fatalf("'%v' does not contain=%v\nout=%s", cmd.Args, v, out)
+ }
+ }
+ }
+}
+
+func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do date +%s; sleep 1; done`)
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ c.Fatalf("run failed with errors: %s, %v", out, err)
+ }
+ cleanedContainerID := strings.TrimSpace(out)
+
+ now := daemonTime(c).Unix()
+ since := now + 2
+ logCmd := exec.Command(dockerBinary, "logs", "-f", fmt.Sprintf("--since=%v", since), cleanedContainerID)
+ out, _, err = runCommandWithOutput(logCmd)
+ if err != nil {
+ c.Fatalf("failed to log container: %s, %v", out, err)
+ }
+ lines := strings.Split(strings.TrimSpace(out), "\n")
+ if len(lines) == 0 {
+ c.Fatal("got no log lines")
+ }
+ for _, v := range lines {
+ ts, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ c.Fatalf("cannot parse timestamp output from log: '%v'\nout=%s", v, out)
+ }
+ if ts < since {
+ c.Fatalf("earlier log found. since=%v logdate=%v", since, ts)
+ }
+ }
}
// Regression test for #8832
@@ -318,3 +374,54 @@ func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) {
}
}
+
+func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) {
+ out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
+ id := strings.TrimSpace(out)
+ c.Assert(waitRun(id), check.IsNil)
+
+ type info struct {
+ NGoroutines int
+ }
+ getNGoroutines := func() int {
+ var i info
+ status, b, err := sockRequest("GET", "/info", nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(status, check.Equals, 200)
+ c.Assert(json.Unmarshal(b, &i), check.IsNil)
+ return i.NGoroutines
+ }
+
+ nroutines := getNGoroutines()
+
+ cmd := exec.Command(dockerBinary, "logs", "-f", id)
+ r, w := io.Pipe()
+ cmd.Stdout = w
+ c.Assert(cmd.Start(), check.IsNil)
+
+ // Make sure pipe is written to
+ chErr := make(chan error)
+ go func() {
+ b := make([]byte, 1)
+ _, err := r.Read(b)
+ chErr <- err
+ }()
+ c.Assert(<-chErr, check.IsNil)
+ c.Assert(cmd.Process.Kill(), check.IsNil)
+
+ // NGoroutines is not updated right away, so we need to wait before failing
+ t := time.After(30 * time.Second)
+ for {
+ select {
+ case <-t:
+ if n := getNGoroutines(); n > nroutines {
+ c.Fatalf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n)
+ }
+ default:
+ if n := getNGoroutines(); n <= nroutines {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+}
diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go
index 875b6540ab..2f8bd662c0 100644
--- a/integration-cli/docker_cli_nat_test.go
+++ b/integration-cli/docker_cli_nat_test.go
@@ -9,9 +9,22 @@ import (
"github.com/go-check/check"
)
-func (s *DockerSuite) TestNetworkNat(c *check.C) {
- testRequires(c, SameHostDaemon, NativeExecDriver)
+func startServerContainer(c *check.C, proto string, port int) string {
+ pStr := fmt.Sprintf("%d:%d", port, port)
+ bCmd := fmt.Sprintf("nc -lp %d && echo bye", port)
+ cmd := []string{"-d", "-p", pStr, "busybox", "sh", "-c", bCmd}
+ if proto == "udp" {
+ cmd = append(cmd, "-u")
+ }
+ name := "server"
+ if err := waitForContainer(name, cmd...); err != nil {
+ c.Fatalf("Failed to launch server container: %v", err)
+ }
+ return name
+}
+
+func getExternalAddress(c *check.C) net.IP {
iface, err := net.InterfaceByName("eth0")
if err != nil {
c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err))
@@ -27,35 +40,72 @@ func (s *DockerSuite) TestNetworkNat(c *check.C) {
c.Fatalf("Error retrieving the up for eth0: %s", err)
}
- runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080")
+ return ifaceIP
+}
+
+func getContainerLogs(c *check.C, containerID string) string {
+ runCmd := exec.Command(dockerBinary, "logs", containerID)
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatal(out, err)
}
-
- cleanedContainerID := strings.TrimSpace(out)
-
- runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP))
- out, _, err = runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(out, err)
- }
-
- runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
- out, _, err = runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatalf("failed to retrieve logs for container: %s, %v", out, err)
- }
-
- out = strings.Trim(out, "\r\n")
-
- if expected := "hello world"; out != expected {
- c.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP)
- }
-
- killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
- if out, _, err = runCommandWithOutput(killCmd); err != nil {
- c.Fatalf("failed to kill container: %s, %v", out, err)
- }
-
+ return strings.Trim(out, "\r\n")
+}
+
+func getContainerStatus(c *check.C, containerID string) string {
+ out, err := inspectField(containerID, "State.Running")
+ c.Assert(err, check.IsNil)
+ return out
+}
+
+func (s *DockerSuite) TestNetworkNat(c *check.C) {
+ testRequires(c, SameHostDaemon, NativeExecDriver)
+
+ srv := startServerContainer(c, "tcp", 8080)
+
+ // Spawn a new container which connects to the server through the
+ // interface address.
+ endpoint := getExternalAddress(c)
+ runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", endpoint))
+ if out, _, err := runCommandWithOutput(runCmd); err != nil {
+ c.Fatalf("Failed to connect to server: %v (output: %q)", err, string(out))
+ }
+
+ result := getContainerLogs(c, srv)
+
+ // Ideally we'd like to check for "hello world" but sometimes
+ // nc doesn't show the data it received so instead let's look for
+ // the output of the 'echo bye' that should be printed once
+ // the nc command gets a connection
+ expected := "bye"
+ if !strings.Contains(result, expected) {
+ c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
+ }
+}
+
+func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) {
+ testRequires(c, SameHostDaemon, NativeExecDriver)
+
+ srv := startServerContainer(c, "tcp", 8081)
+
+ // Attempt to connect from the host to the listening container.
+ conn, err := net.Dial("tcp", "localhost:8081")
+ if err != nil {
+ c.Fatalf("Failed to connect to container (%v)", err)
+ }
+ if _, err := conn.Write([]byte("hello world\n")); err != nil {
+ c.Fatal(err)
+ }
+ conn.Close()
+
+ result := getContainerLogs(c, srv)
+
+ // Ideally we'd like to check for "hello world" but sometimes
+ // nc doesn't show the data it received so instead let's look for
+ // the output of the 'echo bye' that should be printed once
+ // the nc command gets a connection
+ expected := "bye"
+ if !strings.Contains(result, expected) {
+ c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
+ }
}
diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go
index bb34575fba..a9d36be491 100644
--- a/integration-cli/docker_cli_ps_test.go
+++ b/integration-cli/docker_cli_ps_test.go
@@ -304,7 +304,7 @@ func (s *DockerSuite) TestPsListContainersSize(c *check.C) {
}
expectedSize := fmt.Sprintf("%d B", (2 + baseBytes))
foundSize := lines[1][sizeIndex:]
- if foundSize != expectedSize {
+ if !strings.Contains(foundSize, expectedSize) {
c.Fatalf("Expected size %q, got %q", expectedSize, foundSize)
}
@@ -666,3 +666,17 @@ func (s *DockerSuite) TestPsGroupPortRange(c *check.C) {
}
}
+
+func (s *DockerSuite) TestPsWithSize(c *check.C) {
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "sizetest", "busybox", "top"))
+ if err != nil {
+ c.Fatal(out, err)
+ }
+ out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "ps", "--size"))
+ if err != nil {
+ c.Fatal(out, err)
+ }
+ if !strings.Contains(out, "virtual") {
+ c.Fatalf("docker ps with --size should show virtual size of container")
+ }
+}
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
index 69a05ed821..ca971807f9 100644
--- a/integration-cli/docker_cli_push_test.go
+++ b/integration-cli/docker_cli_push_test.go
@@ -1,6 +1,7 @@
package main
import (
+ "archive/tar"
"fmt"
"io/ioutil"
"os"
@@ -8,7 +9,6 @@ import (
"strings"
"time"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"github.com/go-check/check"
)
diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go
index 2b9d5e2323..fd95fd0226 100644
--- a/integration-cli/docker_cli_restart_test.go
+++ b/integration-cli/docker_cli_restart_test.go
@@ -112,11 +112,8 @@ func (s *DockerSuite) TestRestartWithVolumes(c *check.C) {
c.Errorf("expect 1 volume received %s", out)
}
- runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
- volumes, _, err := runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(volumes, err)
- }
+ volumes, err := inspectField(cleanedContainerID, ".Volumes")
+ c.Assert(err, check.IsNil)
runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
if out, _, err = runCommandWithOutput(runCmd); err != nil {
@@ -133,15 +130,10 @@ func (s *DockerSuite) TestRestartWithVolumes(c *check.C) {
c.Errorf("expect 1 volume after restart received %s", out)
}
- runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
- volumesAfterRestart, _, err := runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(volumesAfterRestart, err)
- }
+ volumesAfterRestart, err := inspectField(cleanedContainerID, ".Volumes")
+ c.Assert(err, check.IsNil)
if volumes != volumesAfterRestart {
- volumes = strings.Trim(volumes, " \n\r")
- volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r")
c.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart)
}
@@ -157,9 +149,7 @@ func (s *DockerSuite) TestRestartPolicyNO(c *check.C) {
id := strings.TrimSpace(string(out))
name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
- if err != nil {
- c.Fatal(err, out)
- }
+ c.Assert(err, check.IsNil)
if name != "no" {
c.Fatalf("Container restart policy name is %s, expected %s", name, "no")
}
@@ -176,17 +166,13 @@ func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) {
id := strings.TrimSpace(string(out))
name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
- if err != nil {
- c.Fatal(err, out)
- }
+ c.Assert(err, check.IsNil)
if name != "always" {
c.Fatalf("Container restart policy name is %s, expected %s", name, "always")
}
MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
// MaximumRetryCount=0 if the restart policy is always
if MaximumRetryCount != "0" {
@@ -205,9 +191,7 @@ func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) {
id := strings.TrimSpace(string(out))
name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
- if err != nil {
- c.Fatal(err, out)
- }
+ c.Assert(err, check.IsNil)
if name != "on-failure" {
c.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure")
}
@@ -226,16 +210,12 @@ func (s *DockerSuite) TestContainerRestartwithGoodContainer(c *check.C) {
c.Fatal(err)
}
count, err := inspectField(id, "RestartCount")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if count != "0" {
c.Fatalf("Container was restarted %s times, expected %d", count, 0)
}
MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if MaximumRetryCount != "3" {
c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
}
diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go
index b8d1b843d1..f5884dc0fc 100644
--- a/integration-cli/docker_cli_rm_test.go
+++ b/integration-cli/docker_cli_rm_test.go
@@ -1,7 +1,6 @@
package main
import (
- "net/http"
"os"
"os/exec"
"strings"
@@ -54,16 +53,6 @@ func (s *DockerSuite) TestRmRunningContainer(c *check.C) {
}
-func (s *DockerSuite) TestRmRunningContainerCheckError409(c *check.C) {
-
- createRunningContainer(c, "foo")
-
- endpoint := "/containers/foo"
- status, _, err := sockRequest("DELETE", endpoint, nil)
- c.Assert(status, check.Equals, http.StatusConflict)
- c.Assert(err, check.IsNil)
-}
-
func (s *DockerSuite) TestRmForceRemoveRunningContainer(c *check.C) {
createRunningContainer(c, "foo")
@@ -116,8 +105,8 @@ func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) {
func (s *DockerSuite) TestRmInvalidContainer(c *check.C) {
if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil {
c.Fatal("Expected error on rm unknown container, got none")
- } else if !strings.Contains(out, "failed to remove one or more containers") {
- c.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out)
+ } else if !strings.Contains(out, "failed to remove containers") {
+ c.Fatalf("Expected output to contain 'failed to remove containers', got %q", out)
}
}
diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go
index 9dc2ee297a..c7d0ca8047 100644
--- a/integration-cli/docker_cli_rmi_test.go
+++ b/integration-cli/docker_cli_rmi_test.go
@@ -35,9 +35,6 @@ func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) {
if !strings.Contains(images, "busybox") {
c.Fatalf("The name 'busybox' should not have been removed from images: %q", images)
}
-
- deleteContainer(cleanedContainerID)
-
}
func (s *DockerSuite) TestRmiTag(c *check.C) {
@@ -101,8 +98,8 @@ func (s *DockerSuite) TestRmiImgIDForce(c *check.C) {
c.Fatalf("tag busybox to create 4 more images with same imageID; docker images shows: %q\n", imagesAfter)
}
}
- out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox-test")
- imgID := strings.TrimSpace(out)
+ imgID, err := inspectField("busybox-test", "Id")
+ c.Assert(err, check.IsNil)
// first checkout without force it fails
runCmd = exec.Command(dockerBinary, "rmi", imgID)
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index 0cf5c31eee..1c47e53dfc 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -19,7 +19,7 @@ import (
"time"
"github.com/docker/docker/nat"
- "github.com/docker/docker/pkg/resolvconf"
+ "github.com/docker/libnetwork/resolvconf"
"github.com/go-check/check"
)
@@ -87,7 +87,7 @@ func (s *DockerSuite) TestRunEchoStdoutWithCPUAndMemoryLimit(c *check.C) {
}
// "test" should be printed
-func (s *DockerSuite) TestRunEchoStdoutWitCPUQuota(c *check.C) {
+func (s *DockerSuite) TestRunEchoStdoutWithCPUQuota(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "echo", "test")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
@@ -101,12 +101,9 @@ func (s *DockerSuite) TestRunEchoStdoutWitCPUQuota(c *check.C) {
c.Errorf("container should've printed 'test'")
}
- cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.CpuQuota}}", "test")
- out, _, err = runCommandWithOutput(cmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %s, %v", out, err)
- }
- out = strings.TrimSpace(out)
+ out, err = inspectField("test", "HostConfig.CpuQuota")
+ c.Assert(err, check.IsNil)
+
if out != "8000" {
c.Errorf("setting the CPU CFS quota failed")
}
@@ -123,10 +120,6 @@ func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) {
if out != "test\n" {
c.Errorf("container should've printed 'test'")
}
-
- if err := deleteContainer("testfoonamedcontainer"); err != nil {
- c.Errorf("failed to remove the named container: %v", err)
- }
}
// docker run should not leak file descriptors
@@ -179,19 +172,14 @@ func (s *DockerSuite) TestRunExitCodeOne(c *check.C) {
// it should be possible to pipe in data via stdin to a process running in a container
// some versions of lxc might make this test fail
func (s *DockerSuite) TestRunStdinPipe(c *check.C) {
- runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`)
+ runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
+ runCmd.Stdin = strings.NewReader("blahblah")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out = strings.TrimSpace(out)
-
- inspectCmd := exec.Command(dockerBinary, "inspect", out)
- if out, _, err := runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("out should've been a container id: %s %v", out, err)
- }
-
waitCmd := exec.Command(dockerBinary, "wait", out)
if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
@@ -224,12 +212,6 @@ func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) {
}
out = strings.TrimSpace(out)
-
- inspectCmd := exec.Command(dockerBinary, "inspect", out)
- if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("out should've been a container id: %s %v", inspectOut, err)
- }
-
waitCmd := exec.Command(dockerBinary, "wait", out)
if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
@@ -302,12 +284,8 @@ func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) {
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", "parent")
- ip, _, _, err := runCommandWithStdoutStderr(cmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %v, output: %q", err, ip)
- }
- ip = strings.TrimSpace(ip)
+ ip, err := inspectField("parent", "NetworkSettings.IPAddress")
+ c.Assert(err, check.IsNil)
cmd = exec.Command(dockerBinary, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts")
out, _, err = runCommandWithOutput(cmd)
if err != nil {
@@ -326,12 +304,8 @@ func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) {
c.Fatalf("failed to run container: %v, output: %q", err, cID)
}
cID = strings.TrimSpace(cID)
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", cID)
- ip, _, _, err := runCommandWithStdoutStderr(cmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %v, output: %q", err, ip)
- }
- ip = strings.TrimSpace(ip)
+ ip, err := inspectField(cID, "NetworkSettings.IPAddress")
+ c.Assert(err, check.IsNil)
cmd = exec.Command(dockerBinary, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
@@ -371,6 +345,33 @@ func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) {
}
}
+func (s *DockerSuite) TestRunContainerNetModeWithDnsMacHosts(c *check.C) {
+ cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top")
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("failed to run container: %v, output: %q", err, out)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--dns", "1.2.3.4", "--net=container:parent", "busybox")
+ out, _, err = runCommandWithOutput(cmd)
+ if err == nil || !strings.Contains(out, "Conflicting options: --dns and the network mode") {
+ c.Fatalf("run --net=container with --dns should error out")
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox")
+ out, _, err = runCommandWithOutput(cmd)
+ if err == nil || !strings.Contains(out, "--mac-address and the network mode") {
+ c.Fatalf("run --net=container with --mac-address should error out")
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox")
+ out, _, err = runCommandWithOutput(cmd)
+ if err == nil || !strings.Contains(out, "--add-host and the network mode") {
+ c.Fatalf("run --net=container with --add-host should error out")
+ }
+
+}
+
func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
testRequires(c, ExecSupport)
cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top")
@@ -394,21 +395,6 @@ func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
}
}
-// Regression test for #4741
-func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
- out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
- if err != nil && exitCode != 0 {
- c.Fatal("1", out, stderr, err)
- }
-
- runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
- out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
- if err != nil && exitCode != 0 {
- c.Fatal("2", out, stderr, err)
- }
-}
-
// Regression test for #4979
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
@@ -454,14 +440,6 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
}
}
-// Regression test for #4830
-func (s *DockerSuite) TestRunWithRelativePath(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true")
- if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil {
- c.Fatalf("relative path should result in an error")
- }
-}
-
func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile")
if code, err := runCommand(cmd); err == nil || code == 0 {
@@ -535,7 +513,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
if out, _, err := runCommandWithOutput(cmd); err == nil {
c.Fatal("Expected error about duplicate volume definitions")
} else {
- if !strings.Contains(out, "Duplicate volume") {
+ if !strings.Contains(out, "Duplicate bind mount") {
c.Fatalf("Expected 'duplicate volume' error, got %v", err)
}
}
@@ -973,7 +951,7 @@ func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) {
}
}
-func (s *DockerSuite) TestRunUnPrivilegedCanMknod(c *check.C) {
+func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
@@ -1097,7 +1075,7 @@ func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) {
}
}
-func (s *DockerSuite) TestRunUnPrivilegedCannotMount(c *check.C) {
+func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
out, _, err := runCommandWithOutput(cmd)
if err == nil {
@@ -1137,6 +1115,24 @@ func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) {
}
}
+func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) {
+ runCmd := exec.Command(dockerBinary, "run", "--cpu-period", "50000", "--name", "test", "busybox", "true")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ if err != nil {
+ c.Fatalf("failed to run container: %v, output: %q", err, out)
+ }
+ out = strings.TrimSpace(out)
+ if strings.Contains(out, "Your kernel does not support CPU cfs period") {
+ c.Skip("Your kernel does not support CPU cfs period, skip this test")
+ }
+
+ out, err = inspectField("test", "HostConfig.CpuPeriod")
+ c.Assert(err, check.IsNil)
+ if out != "50000" {
+ c.Errorf("setting the CPU CFS period failed")
+ }
+}
+
func (s *DockerSuite) TestRunWithCpuset(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true")
if code, err := runCommand(cmd); err != nil || code != 0 {
@@ -1158,6 +1154,20 @@ func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) {
}
}
+func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) {
+ cmd := exec.Command(dockerBinary, "run", "--blkio-weight", "300", "busybox", "true")
+ if code, err := runCommand(cmd); err != nil || code != 0 {
+ c.Fatalf("container should run successfully with blkio-weight of 300: %s", err)
+ }
+}
+
+func (s *DockerSuite) TestRunWithBlkioInvalidWeight(c *check.C) {
+ cmd := exec.Command(dockerBinary, "run", "--blkio-weight", "5", "busybox", "true")
+ if _, err := runCommand(cmd); err == nil {
+ c.Fatalf("run with invalid blkio-weight should failed")
+ }
+}
+
func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null")
out, _, err := runCommandWithOutput(cmd)
@@ -1422,14 +1432,38 @@ func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) {
}
}
-// Test the file watch notifier on docker host's /etc/resolv.conf
-// A go-routine is responsible for auto-updating containers which are
-// stopped and have an unmodified copy of resolv.conf, as well as
-// marking running containers as requiring an update on next restart
-func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
- // Because overlay doesn't support inotify properly, we need to skip
- // this test if the docker daemon has Storage Driver == overlay
- testRequires(c, SameHostDaemon, NotOverlay)
+// Test to see if a non-root user can resolve a DNS name and reach out to it. Also
+// check if the container resolv.conf file has atleast 0644 perm.
+func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) {
+ testRequires(c, SameHostDaemon)
+ testRequires(c, Network)
+
+ cmd := exec.Command(dockerBinary, "run", "--name=testperm", "--user=default", "busybox", "ping", "-c", "1", "www.docker.io")
+ if out, err := runCommand(cmd); err != nil {
+ c.Fatal(err, out)
+ }
+
+ cID, err := getIDByName("testperm")
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ fmode := (os.FileMode)(0644)
+ finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf"))
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ if (finfo.Mode() & fmode) != fmode {
+ c.Fatalf("Expected container resolv.conf mode to be atleast %s, instead got %s", fmode.String(), finfo.Mode().String())
+ }
+}
+
+// Test if container resolv.conf gets updated the next time it restarts
+// if host /etc/resolv.conf has changed. This only applies if the container
+// uses the host's /etc/resolv.conf and does not have any dns options provided.
+func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) {
+ testRequires(c, SameHostDaemon)
tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78")
tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
@@ -1455,7 +1489,7 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
}
}()
- //1. test that a non-running container gets an updated resolv.conf
+ //1. test that a restarting container gets an updated resolv.conf
cmd = exec.Command(dockerBinary, "run", "--name='first'", "busybox", "true")
if _, err := runCommand(cmd); err != nil {
c.Fatal(err)
@@ -1471,17 +1505,26 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
c.Fatal(err)
}
- time.Sleep(time.Second / 2)
+ // start the container again to pickup changes
+ cmd = exec.Command(dockerBinary, "start", "first")
+ if out, err := runCommand(cmd); err != nil {
+ c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+ }
+
// check for update in container
containerResolv, err := readContainerFile(containerID1, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
- c.Fatalf("Stopped container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
+ c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
}
- //2. test that a non-running container does not receive resolv.conf updates
+ /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
+ if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+ c.Fatal(err)
+ } */
+ //2. test that a restarting container does not receive resolv.conf updates
// if it modified the container copy of the starting point resolv.conf
cmd = exec.Command(dockerBinary, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
if _, err = runCommand(cmd); err != nil {
@@ -1491,24 +1534,26 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
if err != nil {
c.Fatal(err)
}
- containerResolvHashBefore, err := readContainerFile(containerID2, "resolv.conf.hash")
- if err != nil {
- c.Fatal(err)
- }
//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
- time.Sleep(time.Second / 2)
- containerResolvHashAfter, err := readContainerFile(containerID2, "resolv.conf.hash")
+ // start the container again
+ cmd = exec.Command(dockerBinary, "start", "second")
+ if out, err := runCommand(cmd); err != nil {
+ c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+ }
+
+ // check for update in container
+ containerResolv, err = readContainerFile(containerID2, "resolv.conf")
if err != nil {
c.Fatal(err)
}
- if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) {
- c.Fatalf("Stopped container with modified resolv.conf should not have been updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter)
+ if bytes.Equal(containerResolv, resolvConfSystem) {
+ c.Fatalf("Restarting a container after container updated resolv.conf should not pick up host changes; expected %q, got %q", string(containerResolv), string(resolvConfSystem))
}
//3. test that a running container's resolv.conf is not modified while running
@@ -1519,26 +1564,19 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
}
runningContainerID := strings.TrimSpace(out)
- containerResolvHashBefore, err = readContainerFile(runningContainerID, "resolv.conf.hash")
- if err != nil {
- c.Fatal(err)
- }
-
// replace resolv.conf
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
- // make sure the updater has time to run to validate we really aren't
- // getting updated
- time.Sleep(time.Second / 2)
- containerResolvHashAfter, err = readContainerFile(runningContainerID, "resolv.conf.hash")
+ // check for update in container
+ containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
if err != nil {
c.Fatal(err)
}
- if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) {
- c.Fatalf("Running container's resolv.conf should not be updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter)
+ if bytes.Equal(containerResolv, bytesResolvConf) {
+ c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
}
//4. test that a running container's resolv.conf is updated upon restart
@@ -1554,7 +1592,7 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
- c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
+ c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv))
}
//5. test that additions of a localhost resolver are cleaned from
@@ -1566,7 +1604,12 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
c.Fatal(err)
}
- time.Sleep(time.Second / 2)
+ // start the container again to pickup changes
+ cmd = exec.Command(dockerBinary, "start", "first")
+ if out, err := runCommand(cmd); err != nil {
+ c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+ }
+
// our first exited container ID should have been updated, but with default DNS
// after the cleanup of resolv.conf found only a localhost nameserver:
containerResolv, err = readContainerFile(containerID1, "resolv.conf")
@@ -1608,7 +1651,12 @@ func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) {
c.Fatal(err)
}
- time.Sleep(time.Second / 2)
+ // start the container again to pickup changes
+ cmd = exec.Command(dockerBinary, "start", "third")
+ if out, err := runCommand(cmd); err != nil {
+ c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+ }
+
// check for update in container
containerResolv, err = readContainerFile(containerID3, "resolv.conf")
if err != nil {
@@ -1671,7 +1719,7 @@ func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) {
// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode
// but using --attach instead of -a to make sure we read the flag correctly
-func (s *DockerSuite) TestRunAttachWithDettach(c *check.C) {
+func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true")
_, stderr, _, err := runCommandWithStdoutStderr(cmd)
if err == nil {
@@ -1690,16 +1738,12 @@ func (s *DockerSuite) TestRunState(c *check.C) {
}
id := strings.TrimSpace(out)
state, err := inspectField(id, "State.Running")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1, err := inspectField(id, "State.Pid")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if pid1 == "0" {
c.Fatal("Container state Pid 0")
}
@@ -1710,16 +1754,12 @@ func (s *DockerSuite) TestRunState(c *check.C) {
c.Fatal(err, out)
}
state, err = inspectField(id, "State.Running")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if state != "false" {
c.Fatal("Container state is 'running'")
}
pid2, err := inspectField(id, "State.Pid")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if pid2 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
@@ -1730,16 +1770,12 @@ func (s *DockerSuite) TestRunState(c *check.C) {
c.Fatal(err, out)
}
state, err = inspectField(id, "State.Running")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid3, err := inspectField(id, "State.Pid")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if pid3 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
@@ -1988,7 +2024,7 @@ func (s *DockerSuite) TestRunWithBadDevice(c *check.C) {
if err == nil {
c.Fatal("Run should fail with bad device")
}
- expected := `\"/etc\": not a device node`
+ expected := `"/etc": not a device node`
if !strings.Contains(out, expected) {
c.Fatalf("Output should contain %q, actual out: %q", expected, out)
}
@@ -2115,9 +2151,7 @@ func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) {
}
id := strings.TrimSpace(out)
res, err := inspectField(id, "NetworkSettings.IPAddress")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if res != "" {
c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res)
}
@@ -2146,9 +2180,7 @@ func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) {
}
id := strings.TrimSpace(out)
inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if inspectedMac != mac {
c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac)
}
@@ -2174,9 +2206,7 @@ func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) {
}
id := strings.TrimSpace(out)
ip, err := inspectField(id, "NetworkSettings.IPAddress")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip),
"!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT")
out, _, err = runCommandWithOutput(iptCmd)
@@ -2197,49 +2227,52 @@ func (s *DockerSuite) TestRunPortInUse(c *check.C) {
testRequires(c, SameHostDaemon)
port := "1234"
- l, err := net.Listen("tcp", ":"+port)
- if err != nil {
- c.Fatal(err)
- }
- defer l.Close()
cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("Fail to run listening container")
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
+ out, _, err = runCommandWithOutput(cmd)
if err == nil {
c.Fatalf("Binding on used port must fail")
}
- if !strings.Contains(out, "address already in use") {
- c.Fatalf("Out must be about \"address already in use\", got %s", out)
+ if !strings.Contains(out, "port is already allocated") {
+ c.Fatalf("Out must be about \"port is already allocated\", got %s", out)
}
}
-// https://github.com/docker/docker/issues/8428
-func (s *DockerSuite) TestRunPortProxy(c *check.C) {
- testRequires(c, SameHostDaemon)
-
- port := "12345"
- cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
-
+// https://github.com/docker/docker/issues/12148
+func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) {
+ // allocate a dynamic port to get the most recent
+ cmd := exec.Command(dockerBinary, "run", "-d", "-P", "-p", "80", "busybox", "top")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
- c.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err)
+ c.Fatalf("Failed to run, output: %s, error: %s", out, err)
}
+ id := strings.TrimSpace(out)
- // connett for 10 times here. This will trigger 10 EPIPES in the child
- // process and kill it when it writes to a closed stdout/stderr
- for i := 0; i < 10; i++ {
- net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port))
- }
-
- listPs := exec.Command("sh", "-c", "ps ax | grep docker")
- out, _, err = runCommandWithOutput(listPs)
+ cmd = exec.Command(dockerBinary, "port", id, "80")
+ out, _, err = runCommandWithOutput(cmd)
if err != nil {
- c.Errorf("list docker process failed with output %s, error %s", out, err)
+ c.Fatalf("Failed to get port, output: %s, error: %s", out, err)
}
- if strings.Contains(out, "docker ") {
- c.Errorf("Unexpected defunct docker process")
+ strPort := strings.Split(strings.TrimSpace(out), ":")[1]
+ port, err := strconv.ParseInt(strPort, 10, 64)
+ if err != nil {
+ c.Fatalf("invalid port, got: %s, error: %s", strPort, err)
}
- if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") {
- c.Errorf("Failed to find docker-proxy process, got %s", out)
+
+ // allocate a static port and a dynamic port together, with static port
+ // takes the next recent port in dynamic port range.
+ cmd = exec.Command(dockerBinary, "run", "-d", "-P",
+ "-p", "80",
+ "-p", fmt.Sprintf("%d:8080", port+1),
+ "busybox", "top")
+ out, _, err = runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatalf("Failed to run, output: %s, error: %s", out, err)
}
}
@@ -2277,7 +2310,13 @@ func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
c.Fatal(err)
}
- cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
+ cmd := exec.Command(dockerBinary, "run",
+ "-v", fmt.Sprintf("%s:/tmp", tmpDir),
+ "-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
+ "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
+ "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
+ "busybox:latest", "sh", "-c",
+ "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(out, err)
@@ -2371,41 +2410,6 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
}
}
-func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) {
- testRequires(c, SameHostDaemon)
-
- // Clear out any remnants from other tests
- info, err := ioutil.ReadDir(volumesConfigPath)
- if err != nil {
- c.Fatal(err)
- }
- if len(info) > 0 {
- for _, f := range info {
- if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
- c.Fatal(err)
- }
- }
- }
-
- cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox")
- if _, err := runCommand(cmd); err != nil {
- c.Fatal(err)
- }
-
- cmd = exec.Command(dockerBinary, "start", "lone_starr")
- if _, err := runCommand(cmd); err != nil {
- c.Fatal(err)
- }
-
- info, err = ioutil.ReadDir(volumesConfigPath)
- if err != nil {
- c.Fatal(err)
- }
- if len(info) != 1 {
- c.Fatalf("Expected only 1 volume have %v", len(info))
- }
-}
-
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
// just run with unknown image
cmd := exec.Command(dockerBinary, "run", "asdfsg")
@@ -2433,33 +2437,25 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
}
out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if out != "" {
c.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
- if err != nil {
- c.Fatal(err)
- }
- if !strings.Contains(out, volumesStoragePath) {
+ c.Assert(err, check.IsNil)
+ if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /foo\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if out != "" {
c.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out)
}
out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
- if err != nil {
- c.Fatal(err)
- }
- if !strings.Contains(out, volumesStoragePath) {
+ c.Assert(err, check.IsNil)
+ if !strings.Contains(out, volumesConfigPath) {
c.Fatalf("Volume was not defined for /bar\n%q", out)
}
}
@@ -2495,9 +2491,7 @@ func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) {
}
id := strings.TrimSpace(out)
portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
var ports nat.PortMap
if err = unmarshalJSON([]byte(portstr), &ports); err != nil {
c.Fatal(err)
@@ -2511,9 +2505,6 @@ func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) {
c.Fatalf("Port is not mapped for the port %d", port)
}
}
- if err := deleteContainer(id); err != nil {
- c.Fatal(err)
- }
}
// test docker run expose a invalid port
@@ -2538,14 +2529,8 @@ func (s *DockerSuite) TestRunUnknownCommand(c *check.C) {
runCmd = exec.Command(dockerBinary, "start", cID)
_, _, _, _ = runCommandWithStdoutStderr(runCmd)
- runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID)
- rc, _, _, err2 := runCommandWithStdoutStderr(runCmd)
- rc = strings.TrimSpace(rc)
-
- if err2 != nil {
- c.Fatalf("Error getting status of container: %v", err2)
- }
-
+ rc, err := inspectField(cID, "State.ExitCode")
+ c.Assert(err, check.IsNil)
if rc == "0" {
c.Fatalf("ExitCode(%v) cannot be 0", rc)
}
@@ -2592,16 +2577,12 @@ func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) {
}
id := strings.TrimSpace(out)
state, err := inspectField(id, "State.Running")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1, err := inspectField(id, "State.Pid")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1))
if err != nil {
@@ -2627,6 +2608,23 @@ func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) {
}
}
+func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) {
+ testRequires(c, SameHostDaemon)
+
+ cmd := exec.Command(dockerBinary, "create", "busybox")
+ out, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatal(err, out)
+ }
+ id := strings.TrimSpace(out)
+
+ cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox")
+ out, _, err = runCommandWithOutput(cmd)
+ if err == nil {
+ c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err)
+ }
+}
+
func (s *DockerSuite) TestContainerNetworkMode(c *check.C) {
testRequires(c, SameHostDaemon)
@@ -2640,9 +2638,7 @@ func (s *DockerSuite) TestContainerNetworkMode(c *check.C) {
c.Fatal(err)
}
pid1, err := inspectField(id, "State.Pid")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
if err != nil {
@@ -2699,6 +2695,37 @@ func (s *DockerSuite) TestRunModePidHost(c *check.C) {
}
}
+func (s *DockerSuite) TestRunModeUTSHost(c *check.C) {
+ testRequires(c, NativeExecDriver, SameHostDaemon)
+
+ hostUTS, err := os.Readlink("/proc/1/ns/uts")
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ cmd := exec.Command(dockerBinary, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts")
+ out2, _, err := runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatal(err, out2)
+ }
+
+ out2 = strings.Trim(out2, "\n")
+ if hostUTS != out2 {
+ c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out2)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/uts")
+ out2, _, err = runCommandWithOutput(cmd)
+ if err != nil {
+ c.Fatal(err, out2)
+ }
+
+ out2 = strings.Trim(out2, "\n")
+ if hostUTS == out2 {
+ c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out2)
+ }
+}
+
func (s *DockerSuite) TestRunTLSverify(c *check.C) {
cmd := exec.Command(dockerBinary, "ps")
out, ec, err := runCommandWithOutput(cmd)
@@ -2865,9 +2892,7 @@ func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) {
id := strings.TrimSpace(out)
portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
var ports nat.PortMap
err = unmarshalJSON([]byte(portstr), &ports)
for port, binding := range ports {
@@ -2905,12 +2930,8 @@ func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) {
if out, _, err := runCommandWithOutput(runCmd); err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
- cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.RestartPolicy.Name}}", "test")
- out, _, err := runCommandWithOutput(cmd)
- if err != nil {
- c.Fatalf("failed to inspect container: %v, output: %q", err, out)
- }
- out = strings.Trim(out, "\r\n")
+ out, err := inspectField("test", "HostConfig.RestartPolicy.Name")
+ c.Assert(err, check.IsNil)
if out != "no" {
c.Fatalf("Set default restart policy failed")
}
@@ -2926,16 +2947,12 @@ func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) {
c.Fatal(err)
}
count, err := inspectField(id, "RestartCount")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if count != "3" {
c.Fatalf("Container was restarted %s times, expected %d", count, 3)
}
MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
- if err != nil {
- c.Fatal(err)
- }
+ c.Assert(err, check.IsNil)
if MaximumRetryCount != "3" {
c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
}
@@ -2951,7 +2968,15 @@ func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) {
func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
testRequires(c, NativeExecDriver)
- out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", "/file").CombinedOutput()
+ for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname"} {
+ testReadOnlyFile(f, c)
+ }
+}
+
+func testReadOnlyFile(filename string, c *check.C) {
+ testRequires(c, NativeExecDriver)
+
+ out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", filename).CombinedOutput()
if err == nil {
c.Fatal("expected container to error on run with read only error")
}
@@ -2961,6 +2986,42 @@ func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
}
}
+func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) {
+ testRequires(c, NativeExecDriver)
+
+ _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top"))
+ c.Assert(err, check.IsNil)
+
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts"))
+ c.Assert(err, check.IsNil)
+
+ if !strings.Contains(string(out), "testlinked") {
+ c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled")
+ }
+}
+
+func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDnsFlag(c *check.C) {
+ testRequires(c, NativeExecDriver)
+
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf"))
+ c.Assert(err, check.IsNil)
+
+ if !strings.Contains(string(out), "1.1.1.1") {
+ c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used")
+ }
+}
+
+func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) {
+ testRequires(c, NativeExecDriver)
+
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts"))
+ c.Assert(err, check.IsNil)
+
+ if !strings.Contains(string(out), "testreadonly") {
+ c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used")
+ }
+}
+
func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) {
out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "voltest", "-v", "/foo", "busybox"))
if err != nil {
@@ -3042,3 +3103,86 @@ func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) {
c.Fatal("Kill container timed out")
}
}
+
+func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) {
+ // this memory limit is 1 byte less than the min, which is 4MB
+ // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-m", "4194303", "busybox"))
+ if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") {
+ c.Fatalf("expected run to fail when using too low a memory limit: %q", out)
+ }
+}
+
+func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) {
+ code, err := runCommand(exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version"))
+ if err == nil || code == 0 {
+ c.Fatal("standard container should not be able to write to /proc/asound")
+ }
+}
+
+func (s *DockerSuite) TestRunReadProcTimer(c *check.C) {
+ testRequires(c, NativeExecDriver)
+ out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/timer_stats"))
+ if err != nil || code != 0 {
+ c.Fatal(err)
+ }
+ if strings.Trim(out, "\n ") != "" {
+ c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out)
+ }
+}
+
+func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
+ testRequires(c, NativeExecDriver)
+ // some kernels don't have this configured so skip the test if this file is not found
+ // on the host running the tests.
+ if _, err := os.Stat("/proc/latency_stats"); err != nil {
+ c.Skip("kernel doesnt have latency_stats configured")
+ return
+ }
+ out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/latency_stats"))
+ if err != nil || code != 0 {
+ c.Fatal(err)
+ }
+ if strings.Trim(out, "\n ") != "" {
+ c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out)
+ }
+}
+
+func (s *DockerSuite) TestMountIntoProc(c *check.C) {
+ testRequires(c, NativeExecDriver)
+ code, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/proc//sys", "busybox", "true"))
+ if err == nil || code == 0 {
+ c.Fatal("container should not be able to mount into /proc")
+ }
+}
+
+func (s *DockerSuite) TestMountIntoSys(c *check.C) {
+ testRequires(c, NativeExecDriver)
+ _, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/sys/fs/cgroup", "busybox", "true"))
+ if err != nil {
+ c.Fatal("container should be able to mount into /sys/fs/cgroup")
+ }
+}
+
+func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) {
+ dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
+ dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top")
+ dockerCmd(c, "stop", "first")
+ dockerCmd(c, "stop", "second")
+}
+
+func (s *DockerSuite) TestRunUnshareProc(c *check.C) {
+ testRequires(c, Apparmor)
+
+ name := "acidburn"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount")
+ if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Permission denied") {
+ c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err)
+ }
+
+ name = "cereal"
+ runCmd = exec.Command(dockerBinary, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
+ if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Permission denied") {
+ c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err)
+ }
+}
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
index f83f6645ac..c538890398 100644
--- a/integration-cli/docker_cli_save_load_test.go
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -15,29 +15,22 @@ import (
// save a repo using gz compression and try to load it using stdout
func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ name := "test-save-xz-and-load-repo-stdout"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %v %v", out, err)
}
- cleanedContainerID := strings.TrimSpace(out)
-
repoName := "foobar-save-load-test-xz-gz"
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- out, _, err = runCommandWithOutput(inspectCmd)
- if err != nil {
- c.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
- }
-
- commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
c.Fatalf("failed to commit container: %v %v", out, err)
}
- inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %v %v", before, err)
@@ -71,29 +64,22 @@ func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
// save a repo using xz+gz compression and try to load it using stdout
func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ name := "test-save-xz-gz-and-load-repo-stdout"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %v %v", out, err)
}
- cleanedContainerID := strings.TrimSpace(out)
-
repoName := "foobar-save-load-test-xz-gz"
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- out, _, err = runCommandWithOutput(inspectCmd)
- if err != nil {
- c.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
- }
-
- commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
c.Fatalf("failed to commit container: %v %v", out, err)
}
- inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %v %v", before, err)
@@ -121,10 +107,6 @@ func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
if err == nil {
c.Fatalf("the repo should not exist: %v", after)
}
-
- deleteContainer(cleanedContainerID)
- deleteImages(repoName)
-
}
func (s *DockerSuite) TestSaveSingleTag(c *check.C) {
@@ -207,28 +189,21 @@ func (s *DockerSuite) TestSaveImageId(c *check.C) {
// save a repo and try to load it using flags
func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ name := "test-save-and-load-repo-flags"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %s, %v", out, err)
}
-
- cleanedContainerID := strings.TrimSpace(out)
-
repoName := "foobar-save-load-test"
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("output should've been a container id: %s, %v", out, err)
- }
-
- commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
deleteImages(repoName)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
c.Fatalf("failed to commit container: %s, %v", out, err)
}
- inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
@@ -251,7 +226,6 @@ func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) {
if before != after {
c.Fatalf("inspect is not the same after a save / load")
}
-
}
func (s *DockerSuite) TestSaveMultipleNames(c *check.C) {
diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go
index 658666d6b8..5a6f5809ab 100644
--- a/integration-cli/docker_cli_save_load_unix_test.go
+++ b/integration-cli/docker_cli_save_load_unix_test.go
@@ -4,10 +4,9 @@ package main
import (
"bytes"
- "fmt"
+ "io/ioutil"
"os"
"os/exec"
- "strings"
"github.com/docker/docker/vendor/src/github.com/kr/pty"
"github.com/go-check/check"
@@ -15,43 +14,45 @@ import (
// save a repo and try to load it using stdout
func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
- runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ name := "test-save-and-load-repo-stdout"
+ runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("failed to create a container: %s, %v", out, err)
}
- cleanedContainerID := strings.TrimSpace(out)
-
repoName := "foobar-save-load-test"
- inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
- if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
- c.Fatalf("output should've been a container id: %s, %v", out, err)
- }
-
- commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
if out, _, err = runCommandWithOutput(commitCmd); err != nil {
c.Fatalf("failed to commit container: %s, %v", out, err)
}
- inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
}
- saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar`
- saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName)
- saveCmd := exec.Command("bash", "-c", saveCmdFinal)
- if out, _, err = runCommandWithOutput(saveCmd); err != nil {
- c.Fatalf("failed to save repo: %s, %v", out, err)
+ tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(tmpFile.Name())
+
+ saveCmd := exec.Command(dockerBinary, "save", repoName)
+ saveCmd.Stdout = tmpFile
+
+ if _, err = runCommand(saveCmd); err != nil {
+ c.Fatalf("failed to save repo: %v", err)
}
+ tmpFile, err = os.Open(tmpFile.Name())
+ c.Assert(err, check.IsNil)
+
deleteImages(repoName)
- loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load`
- loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+ loadCmd := exec.Command(dockerBinary, "load")
+ loadCmd.Stdin = tmpFile
+
if out, _, err = runCommandWithOutput(loadCmd); err != nil {
c.Fatalf("failed to load repo: %s, %v", out, err)
}
@@ -66,11 +67,8 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
c.Fatalf("inspect is not the same after a save / load")
}
- deleteContainer(cleanedContainerID)
deleteImages(repoName)
- os.Remove("/tmp/foobar-save-load-test.tar")
-
pty, tty, err := pty.Open()
if err != nil {
c.Fatalf("Could not open pty: %v", err)
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
index c5ecdd03b9..da298a1e0c 100644
--- a/integration-cli/docker_cli_search_test.go
+++ b/integration-cli/docker_cli_search_test.go
@@ -63,6 +63,16 @@ func (s *DockerSuite) TestSearchCmdOptions(c *check.C) {
c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmd, err)
}
+ searchCmdNotrunc := exec.Command(dockerBinary, "search", "--no-trunc=true", "busybox")
+ outSearchCmdNotrunc, _, err := runCommandWithOutput(searchCmdNotrunc)
+ if err != nil {
+ c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmdNotrunc, err)
+ }
+
+ if len(outSearchCmd) > len(outSearchCmdNotrunc) {
+ c.Fatalf("The no-trunc option can't take effect.")
+ }
+
searchCmdautomated := exec.Command(dockerBinary, "search", "--automated=true", "busybox")
outSearchCmdautomated, exitCode, err := runCommandWithOutput(searchCmdautomated) //The busybox is a busybox base image, not an AUTOMATED image.
if err != nil || exitCode != 0 {
diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go
index fddc8c97bb..0475826738 100644
--- a/integration-cli/docker_cli_start_test.go
+++ b/integration-cli/docker_cli_start_test.go
@@ -98,9 +98,7 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
// when container runs successfully, we should not have state.Error
dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
stateErr, err := inspectField("test", "State.Error")
- if err != nil {
- c.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
- }
+ c.Assert(err, check.IsNil)
if stateErr != "" {
c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
}
@@ -111,9 +109,7 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
c.Fatalf("Expected error but got none, output %q", out)
}
stateErr, err = inspectField("test2", "State.Error")
- if err != nil {
- c.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err)
- }
+ c.Assert(err, check.IsNil)
expected := "port is already allocated"
if stateErr == "" || !strings.Contains(stateErr, expected) {
c.Fatalf("State.Error(%q) does not include %q", stateErr, expected)
@@ -123,41 +119,13 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) {
dockerCmd(c, "stop", "test")
dockerCmd(c, "start", "test2")
stateErr, err = inspectField("test2", "State.Error")
- if err != nil {
- c.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
- }
+ c.Assert(err, check.IsNil)
if stateErr != "" {
c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
}
}
-// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s
-func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) {
-
- // Create the first data volume
- dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
-
- // Expect this to fail because the data test after contaienr doesn't exist yet
- if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
- c.Fatal("Expected error but got none")
- }
-
- // Create the second data volume
- dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
-
- // Now, all the volumes should be there
- dockerCmd(c, "start", "consumer")
-
- // Check that we have the volumes we want
- out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer")
- nVolumes := strings.Trim(out, " \r\n'")
- if nVolumes != "2" {
- c.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
- }
-
-}
-
func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
defer unpauseAllContainers()
@@ -196,12 +164,8 @@ func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
if out, _, err := runCommandWithOutput(cmd); err != nil {
c.Fatal(out, err)
}
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", "parent")
- out, _, err := runCommandWithOutput(cmd)
- if err != nil {
- c.Fatal(out, err)
- }
- out = strings.Trim(out, "\r\n")
+ out, err := inspectField("parent", "State.Running")
+ c.Assert(err, check.IsNil)
if out != "false" {
c.Fatal("Container should be stopped")
}
@@ -215,12 +179,8 @@ func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
}
for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} {
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container)
- out, _, err = runCommandWithOutput(cmd)
- if err != nil {
- c.Fatal(out, err)
- }
- out = strings.Trim(out, "\r\n")
+ out, err := inspectField(container, "State.Running")
+ c.Assert(err, check.IsNil)
if out != expected {
c.Fatal("Container running state wrong")
}
@@ -260,12 +220,10 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) {
// confirm the state of all the containers be stopped
for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} {
- cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container)
- out, _, err := runCommandWithOutput(cmd)
+ out, err := inspectField(container, "State.Running")
if err != nil {
c.Fatal(out, err)
}
- out = strings.Trim(out, "\r\n")
if out != expected {
c.Fatal("Container running state wrong")
}
diff --git a/integration-cli/docker_cli_start_volume_driver_unix_test.go b/integration-cli/docker_cli_start_volume_driver_unix_test.go
new file mode 100644
index 0000000000..1cc90801d6
--- /dev/null
+++ b/integration-cli/docker_cli_start_volume_driver_unix_test.go
@@ -0,0 +1,249 @@
+// +build experimental
+// +build !windows
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-check/check"
+)
+
+func init() {
+ check.Suite(&DockerExternalVolumeSuite{
+ ds: &DockerSuite{},
+ })
+}
+
+type eventCounter struct {
+ activations int
+ creations int
+ removals int
+ mounts int
+ unmounts int
+ paths int
+}
+
+type DockerExternalVolumeSuite struct {
+ server *httptest.Server
+ ds *DockerSuite
+ d *Daemon
+ ec *eventCounter
+}
+
+func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
+ s.d = NewDaemon(c)
+ s.ds.SetUpTest(c)
+ s.ec = &eventCounter{}
+
+}
+
+func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) {
+ s.d.Stop()
+ s.ds.TearDownTest(c)
+}
+
+func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
+ mux := http.NewServeMux()
+ s.server = httptest.NewServer(mux)
+
+ type pluginRequest struct {
+ name string
+ }
+
+ mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.activations++
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
+ })
+
+ mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.creations++
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, `{}`)
+ })
+
+ mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.removals++
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, `{}`)
+ })
+
+ mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.paths++
+
+ var pr pluginRequest
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ p := hostVolumePath(pr.name)
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
+ })
+
+ mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.mounts++
+
+ var pr pluginRequest
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ p := hostVolumePath(pr.name)
+ if err := os.MkdirAll(p, 0755); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
+ })
+
+ mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) {
+ s.ec.unmounts++
+
+ var pr pluginRequest
+ if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ p := hostVolumePath(pr.name)
+ if err := os.RemoveAll(p); err != nil {
+ http.Error(w, err.Error(), 500)
+ }
+
+ w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, `{}`)
+ })
+
+ if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+ c.Fatal(err)
+ }
+
+ if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
+ c.Fatal(err)
+ }
+}
+
+func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) {
+ s.server.Close()
+
+ if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+ c.Fatal(err)
+ }
+}
+
+func (s *DockerExternalVolumeSuite) TestStartExternalNamedVolumeDriver(c *check.C) {
+ if err := s.d.StartWithBusybox(); err != nil {
+ c.Fatal(err)
+ }
+
+ out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ if !strings.Contains(out, s.server.URL) {
+ c.Fatalf("External volume mount failed. Output: %s\n", out)
+ }
+
+ p := hostVolumePath("external-volume-test")
+ _, err = os.Lstat(p)
+ if err == nil {
+ c.Fatalf("Expected error checking volume path in host: %s\n", p)
+ }
+
+ if !os.IsNotExist(err) {
+ c.Fatalf("Expected volume path in host to not exist: %s, %v\n", p, err)
+ }
+
+ c.Assert(s.ec.activations, check.Equals, 1)
+ c.Assert(s.ec.creations, check.Equals, 1)
+ c.Assert(s.ec.removals, check.Equals, 1)
+ c.Assert(s.ec.mounts, check.Equals, 1)
+ c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func (s *DockerExternalVolumeSuite) TestStartExternalVolumeUnnamedDriver(c *check.C) {
+ if err := s.d.StartWithBusybox(); err != nil {
+ c.Fatal(err)
+ }
+
+ out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ if !strings.Contains(out, s.server.URL) {
+ c.Fatalf("External volume mount failed. Output: %s\n", out)
+ }
+
+ c.Assert(s.ec.activations, check.Equals, 1)
+ c.Assert(s.ec.creations, check.Equals, 1)
+ c.Assert(s.ec.removals, check.Equals, 1)
+ c.Assert(s.ec.mounts, check.Equals, 1)
+ c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverVolumesFrom(c *check.C) {
+ if err := s.d.StartWithBusybox(); err != nil {
+ c.Fatal(err)
+ }
+
+ if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
+ c.Fatal(err)
+ }
+
+ if _, err := s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp"); err != nil {
+ c.Fatal(err)
+ }
+
+ if _, err := s.d.Cmd("rm", "-f", "vol-test1"); err != nil {
+ c.Fatal(err)
+ }
+
+ c.Assert(s.ec.activations, check.Equals, 1)
+ c.Assert(s.ec.creations, check.Equals, 2)
+ c.Assert(s.ec.removals, check.Equals, 1)
+ c.Assert(s.ec.mounts, check.Equals, 2)
+ c.Assert(s.ec.unmounts, check.Equals, 2)
+}
+
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverDeleteContainer(c *check.C) {
+ if err := s.d.StartWithBusybox(); err != nil {
+ c.Fatal(err)
+ }
+
+ if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
+ c.Fatal(err)
+ }
+
+ if _, err := s.d.Cmd("rm", "-fv", "vol-test1"); err != nil {
+ c.Fatal(err)
+ }
+
+ c.Assert(s.ec.activations, check.Equals, 1)
+ c.Assert(s.ec.creations, check.Equals, 1)
+ c.Assert(s.ec.removals, check.Equals, 1)
+ c.Assert(s.ec.mounts, check.Equals, 1)
+ c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func hostVolumePath(name string) string {
+ return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
+}
diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go
new file mode 100644
index 0000000000..7664de5977
--- /dev/null
+++ b/integration-cli/docker_cli_stats_test.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestCliStatsNoStream(c *check.C) {
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top"))
+ if err != nil {
+ c.Fatalf("Error on container creation: %v, output: %s", err, out)
+ }
+ id := strings.TrimSpace(out)
+ if err := waitRun(id); err != nil {
+ c.Fatalf("error waiting for container to start: %v", err)
+ }
+
+ statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id)
+ chErr := make(chan error)
+ go func() {
+ chErr <- statsCmd.Run()
+ }()
+
+ select {
+ case err := <-chErr:
+ if err != nil {
+ c.Fatalf("Error running stats: %v", err)
+ }
+ case <-time.After(2 * time.Second):
+ statsCmd.Process.Kill()
+ c.Fatalf("stats did not return immediately when not streaming")
+ }
+}
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index 35225f9c1e..7db21a6f37 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -22,15 +22,10 @@ func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) {
// tagging an image by ID in a new unprefixed repo should work
func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) {
- getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox")
- out, _, err := runCommandWithOutput(getIDCmd)
- if err != nil {
- c.Fatalf("failed to get the image ID of busybox: %s, %v", out, err)
- }
-
- cleanedImageID := strings.TrimSpace(out)
- tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz")
- if out, _, err = runCommandWithOutput(tagCmd); err != nil {
+ imageID, err := inspectField("busybox", "Id")
+ c.Assert(err, check.IsNil)
+ tagCmd := exec.Command(dockerBinary, "tag", imageID, "testfoobarbaz")
+ if out, _, err := runCommandWithOutput(tagCmd); err != nil {
c.Fatal(out, err)
}
}
@@ -116,6 +111,30 @@ func (s *DockerSuite) TestTagExistedNameWithForce(c *check.C) {
}
}
+func (s *DockerSuite) TestTagWithSuffixHyphen(c *check.C) {
+ if err := pullImageIfNotExist("busybox:latest"); err != nil {
+ c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+ }
+ // test repository name begin with '-'
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "-busybox:test")
+ out, _, err := runCommandWithOutput(tagCmd)
+ if err == nil || !strings.Contains(out, "Invalid repository name (-busybox). Cannot begin or end with a hyphen") {
+ c.Fatal("tag a name begin with '-' should failed")
+ }
+ // test namespace name begin with '-'
+ tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "-test/busybox:test")
+ out, _, err = runCommandWithOutput(tagCmd)
+ if err == nil || !strings.Contains(out, "Invalid namespace name (-test). Cannot begin or end with a hyphen") {
+ c.Fatal("tag a name begin with '-' should failed")
+ }
+ // test index name begin wiht '-'
+ tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "-index:5000/busybox:test")
+ out, _, err = runCommandWithOutput(tagCmd)
+ if err == nil || !strings.Contains(out, "Invalid index name (-index:5000). Cannot begin or end with a hyphen") {
+ c.Fatal("tag a name begin with '-' should failed")
+ }
+}
+
// ensure tagging using official names works
// ensure all tags result in the same name
func (s *DockerSuite) TestTagOfficialNames(c *check.C) {
diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go
index f941a42cd0..f28e20ba33 100644
--- a/integration-cli/docker_cli_top_test.go
+++ b/integration-cli/docker_cli_top_test.go
@@ -54,8 +54,6 @@ func (s *DockerSuite) TestTopNonPrivileged(c *check.C) {
c.Fatalf("failed to kill container: %s, %v", out, err)
}
- deleteContainer(cleanedContainerID)
-
if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") {
c.Fatal("top should've listed `top` in the process list, but failed twice")
} else if !strings.Contains(out1, "top") {
@@ -92,8 +90,6 @@ func (s *DockerSuite) TestTopPrivileged(c *check.C) {
c.Fatalf("failed to kill container: %s, %v", out, err)
}
- deleteContainer(cleanedContainerID)
-
if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") {
c.Fatal("top should've listed `top` in the process list, but failed twice")
} else if !strings.Contains(out1, "top") {
diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go
index 21f04faf0f..c6d469eed0 100644
--- a/integration-cli/docker_cli_wait_test.go
+++ b/integration-cli/docker_cli_wait_test.go
@@ -1,6 +1,7 @@
package main
import (
+ "bytes"
"os/exec"
"strings"
"time"
@@ -20,12 +21,8 @@ func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) {
status := "true"
for i := 0; status != "false"; i++ {
- runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.Running}}'", containerID)
- status, _, err = runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(status, err)
- }
- status = strings.TrimSpace(status)
+ status, err = inspectField(containerID, "State.Running")
+ c.Assert(err, check.IsNil)
time.Sleep(time.Second)
if i >= 60 {
@@ -44,7 +41,7 @@ func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) {
// blocking wait with 0 exit code
func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) {
- out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' SIGTERM; while true; do sleep 0.01; done")
+ out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do sleep 0.01; done")
containerID := strings.TrimSpace(out)
if err := waitRun(containerID); err != nil {
@@ -83,12 +80,8 @@ func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) {
status := "true"
for i := 0; status != "false"; i++ {
- runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.Running}}'", containerID)
- status, _, err = runCommandWithOutput(runCmd)
- if err != nil {
- c.Fatal(status, err)
- }
- status = strings.TrimSpace(status)
+ status, err = inspectField(containerID, "State.Running")
+ c.Assert(err, check.IsNil)
time.Sleep(time.Second)
if i >= 60 {
@@ -107,7 +100,7 @@ func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) {
// blocking wait with random exit code
func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) {
- out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "trap 'exit 99' SIGTERM; while true; do sleep 0.01; done")
+ out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do sleep 0.01; done")
containerID := strings.TrimSpace(out)
if err := waitRun(containerID); err != nil {
c.Fatal(err)
@@ -116,21 +109,34 @@ func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) {
c.Fatal(err)
}
- chWait := make(chan string)
+ chWait := make(chan error)
+ waitCmd := exec.Command(dockerBinary, "wait", containerID)
+ waitCmdOut := bytes.NewBuffer(nil)
+ waitCmd.Stdout = waitCmdOut
+ if err := waitCmd.Start(); err != nil {
+ c.Fatal(err)
+ }
+
go func() {
- out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID))
- chWait <- out
+ chWait <- waitCmd.Wait()
}()
- time.Sleep(100 * time.Millisecond)
dockerCmd(c, "stop", containerID)
select {
- case status := <-chWait:
+ case err := <-chWait:
+ if err != nil {
+ c.Fatal(err)
+ }
+ status, err := waitCmdOut.ReadString('\n')
+ if err != nil {
+ c.Fatal(err)
+ }
if strings.TrimSpace(status) != "99" {
c.Fatalf("expected exit 99, got %s", status)
}
case <-time.After(2 * time.Second):
+ waitCmd.Process.Kill()
c.Fatal("timeout waiting for `docker wait` to exit")
}
}
diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go
index 9cb28b274e..ed394d26dd 100644
--- a/integration-cli/docker_test_vars.go
+++ b/integration-cli/docker_test_vars.go
@@ -18,7 +18,6 @@ var (
dockerBasePath = "/var/lib/docker"
volumesConfigPath = dockerBasePath + "/volumes"
- volumesStoragePath = dockerBasePath + "/vfs/dir"
containerStoragePath = dockerBasePath + "/containers"
runtimePath = "/var/run/docker"
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index 8386bb59ff..e9c0df6ebc 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -37,6 +37,16 @@ type Daemon struct {
storageDriver string
execDriver string
wait chan error
+ userlandProxy bool
+}
+
+func enableUserlandProxy() bool {
+ if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
+ if val, err := strconv.ParseBool(env); err != nil {
+ return val
+ }
+ }
+ return true
}
// NewDaemon returns a Daemon instance to be used for testing.
@@ -48,7 +58,7 @@ func NewDaemon(c *check.C) *Daemon {
c.Fatal("Please set the DEST environment variable")
}
- dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000))
+ dir := filepath.Join(dest, fmt.Sprintf("d%d", time.Now().UnixNano()%100000000))
daemonFolder, err := filepath.Abs(dir)
if err != nil {
c.Fatalf("Could not make %q an absolute path: %v", dir, err)
@@ -58,11 +68,19 @@ func NewDaemon(c *check.C) *Daemon {
c.Fatalf("Could not create %s/graph directory", daemonFolder)
}
+ userlandProxy := true
+ if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
+ if val, err := strconv.ParseBool(env); err != nil {
+ userlandProxy = val
+ }
+ }
+
return &Daemon{
c: c,
folder: daemonFolder,
storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
execDriver: os.Getenv("DOCKER_EXECDRIVER"),
+ userlandProxy: userlandProxy,
}
}
@@ -79,6 +97,7 @@ func (d *Daemon) Start(arg ...string) error {
"--daemon",
"--graph", fmt.Sprintf("%s/graph", d.folder),
"--pidfile", fmt.Sprintf("%s/docker.pid", d.folder),
+ fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
}
// If we don't explicitly set the log-level or debug flag(-D) then
@@ -570,8 +589,9 @@ func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...strin
return out, status, err
}
-func findContainerIP(c *check.C, id string) string {
- cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id)
+func findContainerIP(c *check.C, id string, vargs ...string) string {
+ args := append(vargs, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id)
+ cmd := exec.Command(dockerBinary, args...)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
@@ -580,6 +600,10 @@ func findContainerIP(c *check.C, id string) string {
return strings.Trim(out, " \r\n'")
}
+func (d *Daemon) findContainerIP(id string) string {
+ return findContainerIP(d.c, id, "--host", d.sock())
+}
+
func getContainerCount() (int, error) {
const containers = "Containers:"
@@ -663,7 +687,6 @@ func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error {
func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) {
ctx, err := fakeContextWithFiles(files)
if err != nil {
- ctx.Close()
return nil, err
}
if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil {
diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go
index cc451bd886..fc4f5ee955 100644
--- a/integration-cli/requirements.go
+++ b/integration-cli/requirements.go
@@ -3,6 +3,7 @@ package main
import (
"encoding/json"
"fmt"
+ "io/ioutil"
"log"
"net/http"
"os/exec"
@@ -44,6 +45,13 @@ var (
},
"Test requires network availability, environment variable set to none to run in a non-network enabled mode.",
}
+ Apparmor = TestRequirement{
+ func() bool {
+ buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+ return err == nil && len(buf) > 1 && buf[0] == 'Y'
+ },
+ "Test requires apparmor is enabled.",
+ }
RegistryHosting = TestRequirement{
func() bool {
// for now registry binary is built only if we're running inside
@@ -78,7 +86,6 @@ var (
},
"Test requires the native (libcontainer) exec driver.",
}
-
NotOverlay = TestRequirement{
func() bool {
cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts")
diff --git a/integration-cli/utils.go b/integration-cli/utils.go
index f0de79ea8f..f7ad61b0ac 100644
--- a/integration-cli/utils.go
+++ b/integration-cli/utils.go
@@ -1,6 +1,7 @@
package main
import (
+ "archive/tar"
"bytes"
"encoding/json"
"errors"
@@ -17,13 +18,12 @@ import (
"time"
"github.com/docker/docker/pkg/stringutils"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
func getExitCode(err error) (int, error) {
exitCode := 0
if exiterr, ok := err.(*exec.ExitError); ok {
- if procExit := exiterr.Sys().(syscall.WaitStatus); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
return procExit.ExitStatus(), nil
}
}
@@ -295,15 +295,6 @@ func fileServer(files map[string]string) (*FileServer, error) {
}, nil
}
-func copyWithCP(source, target string) error {
- copyCmd := exec.Command("cp", "-rp", source, target)
- out, exitCode, err := runCommandWithOutput(copyCmd)
- if err != nil || exitCode != 0 {
- return fmt.Errorf("failed to copy: error: %q ,output: %q", err, out)
- }
- return nil
-}
-
// randomUnixTmpDirPath provides a temporary unix path with rand string appended.
// does not create or checks if it exists.
func randomUnixTmpDirPath(s string) string {
diff --git a/integration/README.md b/integration/README.md
deleted file mode 100644
index 41f43a4ba7..0000000000
--- a/integration/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-## Legacy integration tests
-
-`./integration` contains Docker's legacy integration tests.
-It is DEPRECATED and will eventually be removed.
-
-### If you are a *CONTRIBUTOR* and want to add a test:
-
-* Consider mocking out side effects and contributing a *unit test* in the subsystem
-you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`.
-The events subsystem has unit tests in `./events/events_test.go`. And so on.
-
-* For end-to-end integration tests, please contribute to `./integration-cli`.
-
-
-### If you are a *MAINTAINER*
-
-Please don't allow patches adding new tests to `./integration`.
-
-### If you are *LOOKING FOR A WAY TO HELP*
-
-Please consider porting tests away from `./integration` and into either unit tests or CLI tests.
-
-Any help will be greatly appreciated!
diff --git a/integration/api_test.go b/integration/api_test.go
deleted file mode 100644
index e45fa97e82..0000000000
--- a/integration/api_test.go
+++ /dev/null
@@ -1,680 +0,0 @@
-package docker
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/docker/docker/api"
- "github.com/docker/docker/api/server"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/engine"
- "github.com/docker/docker/runconfig"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-)
-
-func TestPostContainersKill(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/cat"),
- OpenStdin: true,
- },
- t,
- )
-
- startContainer(eng, containerID, t)
-
- // Give some time to the process to start
- containerWaitTimeout(eng, containerID, t)
-
- if !containerRunning(eng, containerID, t) {
- t.Errorf("Container should be running")
- }
-
- r := httptest.NewRecorder()
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusNoContent {
- t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
- }
- if containerRunning(eng, containerID, t) {
- t.Fatalf("The container hasn't been killed")
- }
-}
-
-func TestPostContainersRestart(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/top"),
- OpenStdin: true,
- },
- t,
- )
-
- startContainer(eng, containerID, t)
-
- // Give some time to the process to start
- containerWaitTimeout(eng, containerID, t)
-
- if !containerRunning(eng, containerID, t) {
- t.Errorf("Container should be running")
- }
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
- r := httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusNoContent {
- t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
- }
-
- // Give some time to the process to restart
- containerWaitTimeout(eng, containerID, t)
-
- if !containerRunning(eng, containerID, t) {
- t.Fatalf("Container should be running")
- }
-
- containerKill(eng, containerID, t)
-}
-
-func TestPostContainersStart(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(
- eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/cat"),
- OpenStdin: true,
- },
- t,
- )
-
- hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{})
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Set("Content-Type", "application/json")
-
- r := httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusNoContent {
- t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
- }
-
- containerAssertExists(eng, containerID, t)
-
- req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Set("Content-Type", "application/json")
-
- r = httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
-
- // Starting an already started container should return a 304
- assertHttpNotError(r, t)
- if r.Code != http.StatusNotModified {
- t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
- }
- containerAssertExists(eng, containerID, t)
- containerKill(eng, containerID, t)
-}
-
-func TestPostContainersStop(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/top"),
- OpenStdin: true,
- },
- t,
- )
-
- startContainer(eng, containerID, t)
-
- // Give some time to the process to start
- containerWaitTimeout(eng, containerID, t)
-
- if !containerRunning(eng, containerID, t) {
- t.Errorf("Container should be running")
- }
-
- // Note: as it is a POST request, it requires a body.
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
- r := httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusNoContent {
- t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
- }
- if containerRunning(eng, containerID, t) {
- t.Fatalf("The container hasn't been stopped")
- }
-
- req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
-
- r = httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
-
- // Stopping an already stopper container should return a 304
- assertHttpNotError(r, t)
- if r.Code != http.StatusNotModified {
- t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
- }
-}
-
-func TestPostContainersWait(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/sleep", "1"),
- OpenStdin: true,
- },
- t,
- )
- startContainer(eng, containerID, t)
-
- setTimeout(t, "Wait timed out", 3*time.Second, func() {
- r := httptest.NewRecorder()
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- var apiWait engine.Env
- if err := apiWait.Decode(r.Body); err != nil {
- t.Fatal(err)
- }
- if apiWait.GetInt("StatusCode") != 0 {
- t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode"))
- }
- })
-
- if containerRunning(eng, containerID, t) {
- t.Fatalf("The container should be stopped after wait")
- }
-}
-
-func TestPostContainersAttach(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/cat"),
- OpenStdin: true,
- },
- t,
- )
- // Start the process
- startContainer(eng, containerID, t)
-
- stdin, stdinPipe := io.Pipe()
- stdout, stdoutPipe := io.Pipe()
-
- // Try to avoid the timeout in destroy. Best effort, don't check error
- defer func() {
- closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
- containerKill(eng, containerID, t)
- }()
-
- // Attach to it
- c1 := make(chan struct{})
- go func() {
- defer close(c1)
-
- r := &hijackTester{
- ResponseRecorder: httptest.NewRecorder(),
- in: stdin,
- out: stdoutPipe,
- }
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
-
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r.ResponseRecorder, t)
- }()
-
- // Acknowledge hijack
- setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
- stdout.Read([]byte{})
- stdout.Read(make([]byte, 4096))
- })
-
- setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
- if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
- t.Fatal(err)
- }
- })
-
- // Close pipes (client disconnects)
- if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
- t.Fatal(err)
- }
-
- // Wait for attach to finish, the client disconnected, therefore, Attach finished his job
- setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() {
- <-c1
- })
-
- // We closed stdin, expect /bin/cat to still be running
- // Wait a little bit to make sure container.monitor() did his thing
- containerWaitTimeout(eng, containerID, t)
-
- // Try to avoid the timeout in destroy. Best effort, don't check error
- cStdin, _ := containerAttach(eng, containerID, t)
- cStdin.Close()
- containerWait(eng, containerID, t)
-}
-
-func TestPostContainersAttachStderr(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("/bin/sh", "-c", "/bin/cat >&2"),
- OpenStdin: true,
- },
- t,
- )
- // Start the process
- startContainer(eng, containerID, t)
-
- stdin, stdinPipe := io.Pipe()
- stdout, stdoutPipe := io.Pipe()
-
- // Try to avoid the timeout in destroy. Best effort, don't check error
- defer func() {
- closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
- containerKill(eng, containerID, t)
- }()
-
- // Attach to it
- c1 := make(chan struct{})
- go func() {
- defer close(c1)
-
- r := &hijackTester{
- ResponseRecorder: httptest.NewRecorder(),
- in: stdin,
- out: stdoutPipe,
- }
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{}))
- if err != nil {
- t.Fatal(err)
- }
-
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r.ResponseRecorder, t)
- }()
-
- // Acknowledge hijack
- setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
- stdout.Read([]byte{})
- stdout.Read(make([]byte, 4096))
- })
-
- setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
- if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
- t.Fatal(err)
- }
- })
-
- // Close pipes (client disconnects)
- if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
- t.Fatal(err)
- }
-
- // Wait for attach to finish, the client disconnected, therefore, Attach finished his job
- setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() {
- <-c1
- })
-
- // We closed stdin, expect /bin/cat to still be running
- // Wait a little bit to make sure container.monitor() did his thing
- containerWaitTimeout(eng, containerID, t)
-
- // Try to avoid the timeout in destroy. Best effort, don't check error
- cStdin, _ := containerAttach(eng, containerID, t)
- cStdin.Close()
- containerWait(eng, containerID, t)
-}
-
-func TestOptionsRoute(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- r := httptest.NewRecorder()
- req, err := http.NewRequest("OPTIONS", "/", nil)
- if err != nil {
- t.Fatal(err)
- }
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusOK {
- t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code)
- }
-}
-
-func TestGetEnabledCors(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- r := httptest.NewRecorder()
-
- req, err := http.NewRequest("GET", "/version", nil)
- if err != nil {
- t.Fatal(err)
- }
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
- if r.Code != http.StatusOK {
- t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code)
- }
-
- allowOrigin := r.Header().Get("Access-Control-Allow-Origin")
- allowHeaders := r.Header().Get("Access-Control-Allow-Headers")
- allowMethods := r.Header().Get("Access-Control-Allow-Methods")
-
- if allowOrigin != "*" {
- t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin)
- }
- if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth" {
- t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders)
- }
- if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" {
- t.Errorf("Expected header Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods)
- }
-}
-
-func TestDeleteImages(t *testing.T) {
- eng := NewTestEngine(t)
- //we expect errors, so we disable stderr
- eng.Stderr = ioutil.Discard
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- initialImages := getImages(eng, t, true, "")
-
- d := getDaemon(eng)
- if err := d.Repositories().Tag("test", "test", unitTestImageName, true); err != nil {
- t.Fatal(err)
- }
-
- images := getImages(eng, t, true, "")
-
- if len(images[0].RepoTags) != len(initialImages[0].RepoTags)+1 {
- t.Errorf("Expected %d images, %d found", len(initialImages[0].RepoTags)+1, len(images[0].RepoTags))
- }
-
- req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- r := httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r, req)
- if r.Code != http.StatusConflict {
- t.Fatalf("Expected http status 409-conflict, got %v", r.Code)
- }
-
- req2, err := http.NewRequest("DELETE", "/images/test:test", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- r2 := httptest.NewRecorder()
- server.ServeRequest(eng, api.APIVERSION, r2, req2)
- assertHttpNotError(r2, t)
- if r2.Code != http.StatusOK {
- t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
- }
-
- delImages := []types.ImageDelete{}
- err = json.Unmarshal(r2.Body.Bytes(), &delImages)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(delImages) != 1 {
- t.Fatalf("Expected %d event (untagged), got %d", 1, len(delImages))
- }
- images = getImages(eng, t, false, "")
-
- if len(images) != len(initialImages) {
- t.Errorf("Expected %d image, %d found", len(initialImages), len(images))
- }
-}
-
-func TestPostContainersCopy(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- // Create a container and remove a file
- containerID := createTestContainer(eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("touch", "/test.txt"),
- },
- t,
- )
- containerRun(eng, containerID, t)
-
- r := httptest.NewRecorder()
-
- var copyData engine.Env
- copyData.Set("Resource", "/test.txt")
- copyData.Set("HostPath", ".")
-
- jsonData := bytes.NewBuffer(nil)
- if err := copyData.Encode(jsonData); err != nil {
- t.Fatal(err)
- }
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("Content-Type", "application/json")
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
-
- if r.Code != http.StatusOK {
- t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
- }
-
- found := false
- for tarReader := tar.NewReader(r.Body); ; {
- h, err := tarReader.Next()
- if err != nil {
- if err == io.EOF {
- break
- }
- t.Fatal(err)
- }
- if h.Name == "test.txt" {
- found = true
- break
- }
- }
- if !found {
- t.Fatalf("The created test file has not been found in the copied output")
- }
-}
-
-func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- r := httptest.NewRecorder()
-
- var copyData engine.Env
- copyData.Set("Resource", "/test.txt")
- copyData.Set("HostPath", ".")
-
- jsonData := bytes.NewBuffer(nil)
- if err := copyData.Encode(jsonData); err != nil {
- t.Fatal(err)
- }
-
- req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("Content-Type", "application/json")
- server.ServeRequest(eng, api.APIVERSION, r, req)
- if r.Code != http.StatusNotFound {
- t.Fatalf("404 expected for id_not_found Container, received %v", r.Code)
- }
-}
-
-// Regression test for https://github.com/docker/docker/issues/6231
-func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- r := httptest.NewRecorder()
-
- var testData engine.Env
- testData.Set("Image", "docker-test-image")
- testData.SetAuto("Volumes", map[string]struct{}{"/foo": {}})
- testData.Set("Cmd", "true")
- jsonData := bytes.NewBuffer(nil)
- if err := testData.Encode(jsonData); err != nil {
- t.Fatal(err)
- }
-
- req, err := http.NewRequest("POST", "/containers/create?name=chunk_test", jsonData)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Add("Content-Type", "application/json")
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
-
- var testData2 engine.Env
- testData2.SetAuto("Binds", []string{"/tmp:/foo"})
- jsonData = bytes.NewBuffer(nil)
- if err := testData2.Encode(jsonData); err != nil {
- t.Fatal(err)
- }
-
- req, err = http.NewRequest("POST", "/containers/chunk_test/start", jsonData)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Add("Content-Type", "application/json")
- // This is a cheat to make the http request do chunked encoding
- // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
- // https://golang.org/src/pkg/net/http/request.go?s=11980:12172
- req.ContentLength = -1
- server.ServeRequest(eng, api.APIVERSION, r, req)
- assertHttpNotError(r, t)
-
- type config struct {
- HostConfig struct {
- Binds []string
- }
- }
-
- req, err = http.NewRequest("GET", "/containers/chunk_test/json", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- r2 := httptest.NewRecorder()
- req.Header.Add("Content-Type", "application/json")
- server.ServeRequest(eng, api.APIVERSION, r2, req)
- assertHttpNotError(r, t)
-
- c := config{}
-
- json.Unmarshal(r2.Body.Bytes(), &c)
-
- if len(c.HostConfig.Binds) == 0 {
- t.Fatal("Chunked Encoding not handled")
- }
-
- if c.HostConfig.Binds[0] != "/tmp:/foo" {
- t.Fatal("Chunked encoding not properly handled, expected binds to be /tmp:/foo, got:", c.HostConfig.Binds[0])
- }
-}
-
-// Mocked types for tests
-type NopConn struct {
- io.ReadCloser
- io.Writer
-}
-
-func (c *NopConn) LocalAddr() net.Addr { return nil }
-func (c *NopConn) RemoteAddr() net.Addr { return nil }
-func (c *NopConn) SetDeadline(t time.Time) error { return nil }
-func (c *NopConn) SetReadDeadline(t time.Time) error { return nil }
-func (c *NopConn) SetWriteDeadline(t time.Time) error { return nil }
-
-type hijackTester struct {
- *httptest.ResponseRecorder
- in io.ReadCloser
- out io.Writer
-}
-
-func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- bufrw := bufio.NewReadWriter(bufio.NewReader(t.in), bufio.NewWriter(t.out))
- conn := &NopConn{
- ReadCloser: t.in,
- Writer: t.out,
- }
- return conn, bufrw, nil
-}
diff --git a/integration/container_test.go b/integration/container_test.go
deleted file mode 100644
index 9256e9997f..0000000000
--- a/integration/container_test.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package docker
-
-import (
- "io"
- "io/ioutil"
- "testing"
- "time"
-
- "github.com/docker/docker/runconfig"
-)
-
-func TestRestartStdin(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("cat"),
-
- OpenStdin: true,
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer daemon.Rm(container)
-
- stdin := container.StdinPipe()
- stdout := container.StdoutPipe()
- if err := container.Start(); err != nil {
- t.Fatal(err)
- }
- if _, err := io.WriteString(stdin, "hello world"); err != nil {
- t.Fatal(err)
- }
- if err := stdin.Close(); err != nil {
- t.Fatal(err)
- }
- container.WaitStop(-1 * time.Second)
- output, err := ioutil.ReadAll(stdout)
- if err != nil {
- t.Fatal(err)
- }
- if err := stdout.Close(); err != nil {
- t.Fatal(err)
- }
- if string(output) != "hello world" {
- t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
- }
-
- // Restart and try again
- stdin = container.StdinPipe()
- stdout = container.StdoutPipe()
- if err := container.Start(); err != nil {
- t.Fatal(err)
- }
- if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
- t.Fatal(err)
- }
- if err := stdin.Close(); err != nil {
- t.Fatal(err)
- }
- container.WaitStop(-1 * time.Second)
- output, err = ioutil.ReadAll(stdout)
- if err != nil {
- t.Fatal(err)
- }
- if err := stdout.Close(); err != nil {
- t.Fatal(err)
- }
- if string(output) != "hello world #2" {
- t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output))
- }
-}
-
-func TestStdin(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("cat"),
-
- OpenStdin: true,
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer daemon.Rm(container)
-
- stdin := container.StdinPipe()
- stdout := container.StdoutPipe()
- if err := container.Start(); err != nil {
- t.Fatal(err)
- }
- defer stdin.Close()
- defer stdout.Close()
- if _, err := io.WriteString(stdin, "hello world"); err != nil {
- t.Fatal(err)
- }
- if err := stdin.Close(); err != nil {
- t.Fatal(err)
- }
- container.WaitStop(-1 * time.Second)
- output, err := ioutil.ReadAll(stdout)
- if err != nil {
- t.Fatal(err)
- }
- if string(output) != "hello world" {
- t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
- }
-}
-
-func TestTty(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("cat"),
-
- OpenStdin: true,
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer daemon.Rm(container)
-
- stdin := container.StdinPipe()
- stdout := container.StdoutPipe()
- if err := container.Start(); err != nil {
- t.Fatal(err)
- }
- defer stdin.Close()
- defer stdout.Close()
- if _, err := io.WriteString(stdin, "hello world"); err != nil {
- t.Fatal(err)
- }
- if err := stdin.Close(); err != nil {
- t.Fatal(err)
- }
- container.WaitStop(-1 * time.Second)
- output, err := ioutil.ReadAll(stdout)
- if err != nil {
- t.Fatal(err)
- }
- if string(output) != "hello world" {
- t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
- }
-}
-
-func BenchmarkRunSequential(b *testing.B) {
- daemon := mkDaemon(b)
- defer nuke(daemon)
- for i := 0; i < b.N; i++ {
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("echo", "-n", "foo"),
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- b.Fatal(err)
- }
- defer daemon.Rm(container)
- output, err := container.Output()
- if err != nil {
- b.Fatal(err)
- }
- if string(output) != "foo" {
- b.Fatalf("Unexpected output: %s", output)
- }
- if err := daemon.Rm(container); err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkRunParallel(b *testing.B) {
- daemon := mkDaemon(b)
- defer nuke(daemon)
-
- var tasks []chan error
-
- for i := 0; i < b.N; i++ {
- complete := make(chan error)
- tasks = append(tasks, complete)
- go func(i int, complete chan error) {
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("echo", "-n", "foo"),
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- complete <- err
- return
- }
- defer daemon.Rm(container)
- if err := container.Start(); err != nil {
- complete <- err
- return
- }
- if _, err := container.WaitStop(15 * time.Second); err != nil {
- complete <- err
- return
- }
- // if string(output) != "foo" {
- // complete <- fmt.Errorf("Unexpected output: %v", string(output))
- // }
- if err := daemon.Rm(container); err != nil {
- complete <- err
- return
- }
- complete <- nil
- }(i, complete)
- }
- var errors []error
- for _, task := range tasks {
- err := <-task
- if err != nil {
- errors = append(errors, err)
- }
- }
- if len(errors) > 0 {
- b.Fatal(errors)
- }
-}
diff --git a/integration/runtime_test.go b/integration/runtime_test.go
deleted file mode 100644
index a2f22072c3..0000000000
--- a/integration/runtime_test.go
+++ /dev/null
@@ -1,847 +0,0 @@
-package docker
-
-import (
- "bytes"
- "fmt"
- "io"
- std_log "log"
- "net"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "syscall"
- "testing"
- "time"
-
- "github.com/Sirupsen/logrus"
- apiserver "github.com/docker/docker/api/server"
- "github.com/docker/docker/cliconfig"
- "github.com/docker/docker/daemon"
- "github.com/docker/docker/daemon/execdriver"
- "github.com/docker/docker/engine"
- "github.com/docker/docker/graph"
- "github.com/docker/docker/image"
- "github.com/docker/docker/nat"
- "github.com/docker/docker/pkg/fileutils"
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/reexec"
- "github.com/docker/docker/pkg/stringid"
- "github.com/docker/docker/runconfig"
- "github.com/docker/docker/utils"
-)
-
-const (
- unitTestImageName = "docker-test-image"
- unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
- unitTestImageIDShort = "83599e29c455"
- unitTestNetworkBridge = "testdockbr0"
- unitTestStoreBase = "/var/lib/docker/unit-tests"
- unitTestDockerTmpdir = "/var/lib/docker/tmp"
- testDaemonAddr = "127.0.0.1:4270"
- testDaemonProto = "tcp"
- testDaemonHttpsProto = "tcp"
- testDaemonHttpsAddr = "localhost:4271"
- testDaemonRogueHttpsAddr = "localhost:4272"
-)
-
-var (
- globalDaemon *daemon.Daemon
- globalHttpsEngine *engine.Engine
- globalRogueHttpsEngine *engine.Engine
- startFds int
- startGoroutines int
-)
-
-// FIXME: nuke() is deprecated by Daemon.Nuke()
-func nuke(daemon *daemon.Daemon) error {
- return daemon.Nuke()
-}
-
-// FIXME: cleanup and nuke are redundant.
-func cleanup(eng *engine.Engine, t *testing.T) error {
- daemon := mkDaemonFromEngine(eng, t)
- for _, container := range daemon.List() {
- container.Kill()
- daemon.Rm(container)
- }
- images, err := daemon.Repositories().Images(&graph.ImagesConfig{})
- if err != nil {
- t.Fatal(err)
- }
- for _, image := range images {
- if image.ID != unitTestImageID {
- eng.Job("image_delete", image.ID).Run()
- }
- }
- return nil
-}
-
-func init() {
- // Always use the same driver (vfs) for all integration tests.
- // To test other drivers, we need a dedicated driver validation suite.
- os.Setenv("DOCKER_DRIVER", "vfs")
- os.Setenv("TEST", "1")
- os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir)
-
- // Hack to run sys init during unit testing
- if reexec.Init() {
- return
- }
-
- if uid := syscall.Geteuid(); uid != 0 {
- logrus.Fatalf("docker tests need to be run as root")
- }
-
- // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary)
- if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" {
- src, err := os.Open(dockerinit)
- if err != nil {
- logrus.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err)
- }
- defer src.Close()
- dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555)
- if err != nil {
- logrus.Fatalf("Unable to create dockerinit in test directory: %s", err)
- }
- defer dst.Close()
- if _, err := io.Copy(dst, src); err != nil {
- logrus.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err)
- }
- dst.Close()
- src.Close()
- }
-
- // Setup the base daemon, which will be duplicated for each test.
- // (no tests are run directly in the base)
- setupBaseImage()
-
- // Create the "global daemon" with a long-running daemons for integration tests
- spawnGlobalDaemon()
- startFds, startGoroutines = fileutils.GetTotalUsedFds(), runtime.NumGoroutine()
-}
-
-func setupBaseImage() {
- eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase)
- d := getDaemon(eng)
-
- _, err := d.Repositories().Lookup(unitTestImageName)
- // If the unit test is not found, try to download it.
- if err != nil {
- // seems like we can just ignore the error here...
- // there was a check of imgId from job stdout against unittestid but
- // if there was an error how could the imgid from the job
- // be compared?! it's obvious it's different, am I totally wrong?
-
- // Retrieve the Image
- imagePullConfig := &graph.ImagePullConfig{
- Parallel: true,
- OutStream: ioutils.NopWriteCloser(os.Stdout),
- AuthConfig: &cliconfig.AuthConfig{},
- }
- if err := d.Repositories().Pull(unitTestImageName, "", imagePullConfig); err != nil {
- logrus.Fatalf("Unable to pull the test image: %s", err)
- }
- }
-}
-
-func spawnGlobalDaemon() {
- if globalDaemon != nil {
- logrus.Debugf("Global daemon already exists. Skipping.")
- return
- }
- t := std_log.New(os.Stderr, "", 0)
- eng := NewTestEngine(t)
- globalDaemon = mkDaemonFromEngine(eng, t)
-
- serverConfig := &apiserver.ServerConfig{Logging: true}
- api := apiserver.New(serverConfig, eng)
- // Spawn a Daemon
- go func() {
- logrus.Debugf("Spawning global daemon for integration tests")
- listenURL := &url.URL{
- Scheme: testDaemonProto,
- Host: testDaemonAddr,
- }
-
- if err := api.ServeApi([]string{listenURL.String()}); err != nil {
- logrus.Fatalf("Unable to spawn the test daemon: %s", err)
- }
- }()
-
- // Give some time to ListenAndServer to actually start
- // FIXME: use inmem transports instead of tcp
- time.Sleep(time.Second)
-
- api.AcceptConnections(getDaemon(eng))
-}
-
-// FIXME: test that ImagePull(json=true) send correct json output
-
-func GetTestImage(daemon *daemon.Daemon) *image.Image {
- imgs, err := daemon.Graph().Map()
- if err != nil {
- logrus.Fatalf("Unable to get the test image: %s", err)
- }
- for _, image := range imgs {
- if image.ID == unitTestImageID {
- return image
- }
- }
- logrus.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs)
- return nil
-}
-
-func TestDaemonCreate(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
-
- // Make sure we start we 0 containers
- if len(daemon.List()) != 0 {
- t.Errorf("Expected 0 containers, %v found", len(daemon.List()))
- }
-
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("ls", "-al"),
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
-
- defer func() {
- if err := daemon.Rm(container); err != nil {
- t.Error(err)
- }
- }()
-
- // Make sure we can find the newly created container with List()
- if len(daemon.List()) != 1 {
- t.Errorf("Expected 1 container, %v found", len(daemon.List()))
- }
-
- // Make sure the container List() returns is the right one
- if daemon.List()[0].ID != container.ID {
- t.Errorf("Unexpected container %v returned by List", daemon.List()[0])
- }
-
- // Make sure we can get the container with Get()
- if _, err := daemon.Get(container.ID); err != nil {
- t.Errorf("Unable to get newly created container")
- }
-
- // Make sure it is the right container
- if c, _ := daemon.Get(container.ID); c != container {
- t.Errorf("Get() returned the wrong container")
- }
-
- // Make sure Exists returns it as existing
- if !daemon.Exists(container.ID) {
- t.Errorf("Exists() returned false for a newly created container")
- }
-
- // Test that conflict error displays correct details
- cmd := runconfig.NewCommand("ls", "-al")
- testContainer, _, _ := daemon.Create(
- &runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: cmd,
- },
- &runconfig.HostConfig{},
- "conflictname",
- )
- if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: cmd}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), stringid.TruncateID(testContainer.ID)) {
- t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err)
- }
-
- // Make sure create with bad parameters returns an error
- if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil {
- t.Fatal("Builder.Create should throw an error when Cmd is missing")
- }
-
- if _, _, err := daemon.Create(
- &runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand(),
- },
- &runconfig.HostConfig{},
- "",
- ); err == nil {
- t.Fatal("Builder.Create should throw an error when Cmd is empty")
- }
-
- config := &runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("/bin/ls"),
- PortSpecs: []string{"80"},
- }
- container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "")
-
- _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config)
- if err != nil {
- t.Error(err)
- }
-
- // test expose 80:8000
- container, warnings, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("ls", "-al"),
- PortSpecs: []string{"80:8000"},
- },
- &runconfig.HostConfig{},
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- if warnings == nil || len(warnings) != 1 {
- t.Error("Expected a warning, got none")
- }
-}
-
-func TestDestroy(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
-
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("ls", "-al"),
- },
- &runconfig.HostConfig{},
- "")
- if err != nil {
- t.Fatal(err)
- }
- // Destroy
- if err := daemon.Rm(container); err != nil {
- t.Error(err)
- }
-
- // Make sure daemon.Exists() behaves correctly
- if daemon.Exists("test_destroy") {
- t.Errorf("Exists() returned true")
- }
-
- // Make sure daemon.List() doesn't list the destroyed container
- if len(daemon.List()) != 0 {
- t.Errorf("Expected 0 container, %v found", len(daemon.List()))
- }
-
- // Make sure daemon.Get() refuses to return the unexisting container
- if c, _ := daemon.Get(container.ID); c != nil {
- t.Errorf("Got a container that should not exist")
- }
-
- // Test double destroy
- if err := daemon.Rm(container); err == nil {
- // It should have failed
- t.Errorf("Double destroy did not fail")
- }
-}
-
-func TestGet(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
-
- container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
- defer daemon.Rm(container1)
-
- container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
- defer daemon.Rm(container2)
-
- container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
- defer daemon.Rm(container3)
-
- if c, _ := daemon.Get(container1.ID); c != container1 {
- t.Errorf("Get(test1) returned %v while expecting %v", c, container1)
- }
-
- if c, _ := daemon.Get(container2.ID); c != container2 {
- t.Errorf("Get(test2) returned %v while expecting %v", c, container2)
- }
-
- if c, _ := daemon.Get(container3.ID); c != container3 {
- t.Errorf("Get(test3) returned %v while expecting %v", c, container3)
- }
-
-}
-
-func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) {
- var (
- err error
- id string
- strPort string
- eng = NewTestEngine(t)
- daemon = mkDaemonFromEngine(eng, t)
- port = 5554
- p nat.Port
- )
- defer func() {
- if err != nil {
- daemon.Nuke()
- }
- }()
-
- for {
- port += 1
- strPort = strconv.Itoa(port)
- var cmd string
- if proto == "tcp" {
- cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
- } else if proto == "udp" {
- cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
- } else {
- t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
- }
- ep := make(map[nat.Port]struct{}, 1)
- p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto))
- ep[p] = struct{}{}
-
- c := &runconfig.Config{
- Image: unitTestImageID,
- Cmd: runconfig.NewCommand("sh", "-c", cmd),
- PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
- ExposedPorts: ep,
- }
-
- id, _, err = daemon.ContainerCreate(unitTestImageID, c, &runconfig.HostConfig{})
- // FIXME: this relies on the undocumented behavior of daemon.Create
- // which will return a nil error AND container if the exposed ports
- // are invalid. That behavior should be fixed!
- if id != "" {
- break
- }
- t.Logf("Port %v already in use, trying another one", strPort)
-
- }
-
- if err := daemon.ContainerStart(id, &runconfig.HostConfig{}); err != nil {
- t.Fatal(err)
- }
-
- container, err := daemon.Get(id)
- if err != nil {
- t.Fatal(err)
- }
-
- setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
- for !container.IsRunning() {
- time.Sleep(10 * time.Millisecond)
- }
- })
-
- // Even if the state is running, lets give some time to lxc to spawn the process
- container.WaitStop(500 * time.Millisecond)
-
- strPort = container.NetworkSettings.Ports[p][0].HostPort
- return daemon, container, strPort
-}
-
-// Run a container with a TCP port allocated, and test that it can receive connections on localhost
-func TestAllocateTCPPortLocalhost(t *testing.T) {
- daemon, container, port := startEchoServerContainer(t, "tcp")
- defer nuke(daemon)
- defer container.Kill()
-
- for i := 0; i != 10; i++ {
- conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
-
- input := bytes.NewBufferString("well hello there\n")
- _, err = conn.Write(input.Bytes())
- if err != nil {
- t.Fatal(err)
- }
- buf := make([]byte, 16)
- read := 0
- conn.SetReadDeadline(time.Now().Add(3 * time.Second))
- read, err = conn.Read(buf)
- if err != nil {
- if err, ok := err.(*net.OpError); ok {
- if err.Err == syscall.ECONNRESET {
- t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
- conn.Close()
- time.Sleep(time.Second)
- continue
- }
- if err.Timeout() {
- t.Log("Timeout, trying again")
- conn.Close()
- continue
- }
- }
- t.Fatal(err)
- }
- output := string(buf[:read])
- if !strings.Contains(output, "well hello there") {
- t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
- } else {
- return
- }
- }
-
- t.Fatal("No reply from the container")
-}
-
-// Run a container with an UDP port allocated, and test that it can receive connections on localhost
-func TestAllocateUDPPortLocalhost(t *testing.T) {
- daemon, container, port := startEchoServerContainer(t, "udp")
- defer nuke(daemon)
- defer container.Kill()
-
- conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
- if err != nil {
- t.Fatal(err)
- }
- defer conn.Close()
-
- input := bytes.NewBufferString("well hello there\n")
- buf := make([]byte, 16)
- // Try for a minute, for some reason the select in socat may take ages
- // to return even though everything on the path seems fine (i.e: the
- // UDPProxy forwards the traffic correctly and you can see the packets
- // on the interface from within the container).
- for i := 0; i != 120; i++ {
- _, err := conn.Write(input.Bytes())
- if err != nil {
- t.Fatal(err)
- }
- conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
- read, err := conn.Read(buf)
- if err == nil {
- output := string(buf[:read])
- if strings.Contains(output, "well hello there") {
- return
- }
- }
- }
-
- t.Fatal("No reply from the container")
-}
-
-func TestRestore(t *testing.T) {
- eng := NewTestEngine(t)
- daemon1 := mkDaemonFromEngine(eng, t)
- defer daemon1.Nuke()
- // Create a container with one instance of docker
- container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t)
- defer daemon1.Rm(container1)
-
- // Create a second container meant to be killed
- container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t)
- defer daemon1.Rm(container2)
-
- // Start the container non blocking
- if err := container2.Start(); err != nil {
- t.Fatal(err)
- }
-
- if !container2.IsRunning() {
- t.Fatalf("Container %v should appear as running but isn't", container2.ID)
- }
-
- // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
- cStdin := container2.StdinPipe()
- cStdin.Close()
- if _, err := container2.WaitStop(2 * time.Second); err != nil {
- t.Fatal(err)
- }
- container2.SetRunning(42)
- container2.ToDisk()
-
- if len(daemon1.List()) != 2 {
- t.Errorf("Expected 2 container, %v found", len(daemon1.List()))
- }
- if err := container1.Run(); err != nil {
- t.Fatal(err)
- }
-
- if !container2.IsRunning() {
- t.Fatalf("Container %v should appear as running but isn't", container2.ID)
- }
-
- // Here are are simulating a docker restart - that is, reloading all containers
- // from scratch
- eng = newTestEngine(t, false, daemon1.Config().Root)
- daemon2 := mkDaemonFromEngine(eng, t)
- if len(daemon2.List()) != 2 {
- t.Errorf("Expected 2 container, %v found", len(daemon2.List()))
- }
- runningCount := 0
- for _, c := range daemon2.List() {
- if c.IsRunning() {
- t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
- runningCount++
- }
- }
- if runningCount != 0 {
- t.Fatalf("Expected 0 container alive, %d found", runningCount)
- }
- container3, err := daemon2.Get(container1.ID)
- if err != nil {
- t.Fatal("Unable to Get container")
- }
- if err := container3.Run(); err != nil {
- t.Fatal(err)
- }
- container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
-}
-
-func TestDefaultContainerName(t *testing.T) {
- eng := NewTestEngine(t)
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name"))
- if err != nil {
- t.Fatal(err)
- }
- containerID := container.ID
-
- if container.Name != "/some_name" {
- t.Fatalf("Expect /some_name got %s", container.Name)
- }
-
- c, err := daemon.Get("/some_name")
- if err != nil {
- t.Fatalf("Couldn't retrieve test container as /some_name")
- }
- if c.ID != containerID {
- t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
- }
-}
-
-func TestRandomContainerName(t *testing.T) {
- eng := NewTestEngine(t)
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- container, err := daemon.Get(createTestContainer(eng, config, t))
- if err != nil {
- t.Fatal(err)
- }
- containerID := container.ID
-
- if container.Name == "" {
- t.Fatalf("Expected not empty container name")
- }
-
- if c, err := daemon.Get(container.Name); err != nil {
- logrus.Fatalf("Could not lookup container %s by its name", container.Name)
- } else if c.ID != containerID {
- logrus.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
- }
-}
-
-func TestContainerNameValidation(t *testing.T) {
- eng := NewTestEngine(t)
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- for _, test := range []struct {
- Name string
- Valid bool
- }{
- {"abc-123_AAA.1", true},
- {"\000asdf", false},
- } {
- config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
- if err != nil {
- if !test.Valid {
- continue
- }
- t.Fatal(err)
- }
-
- containerId, _, err := daemon.ContainerCreate(test.Name, config, &runconfig.HostConfig{})
- if err != nil {
- if !test.Valid {
- continue
- }
- t.Fatal(err)
- }
-
- container, err := daemon.Get(containerId)
- if err != nil {
- t.Fatal(err)
- }
-
- if container.Name != "/"+test.Name {
- t.Fatalf("Expect /%s got %s", test.Name, container.Name)
- }
-
- if c, err := daemon.Get("/" + test.Name); err != nil {
- t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
- } else if c.ID != container.ID {
- t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
- }
- }
-}
-
-func TestLinkChildContainer(t *testing.T) {
- eng := NewTestEngine(t)
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
- if err != nil {
- t.Fatal(err)
- }
-
- webapp, err := daemon.GetByName("/webapp")
- if err != nil {
- t.Fatal(err)
- }
-
- if webapp.ID != container.ID {
- t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
- }
-
- config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- childContainer, err := daemon.Get(createTestContainer(eng, config, t))
- if err != nil {
- t.Fatal(err)
- }
-
- if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
- t.Fatal(err)
- }
-
- // Get the child by it's new name
- db, err := daemon.GetByName("/webapp/db")
- if err != nil {
- t.Fatal(err)
- }
- if db.ID != childContainer.ID {
- t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID)
- }
-}
-
-func TestGetAllChildren(t *testing.T) {
- eng := NewTestEngine(t)
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
- if err != nil {
- t.Fatal(err)
- }
-
- webapp, err := daemon.GetByName("/webapp")
- if err != nil {
- t.Fatal(err)
- }
-
- if webapp.ID != container.ID {
- t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
- }
-
- config, _, _, err = parseRun([]string{unitTestImageID, "echo test"})
- if err != nil {
- t.Fatal(err)
- }
-
- childContainer, err := daemon.Get(createTestContainer(eng, config, t))
- if err != nil {
- t.Fatal(err)
- }
-
- if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
- t.Fatal(err)
- }
-
- children, err := daemon.Children("/webapp")
- if err != nil {
- t.Fatal(err)
- }
-
- if children == nil {
- t.Fatal("Children should not be nil")
- }
- if len(children) == 0 {
- t.Fatal("Children should not be empty")
- }
-
- for key, value := range children {
- if key != "/webapp/db" {
- t.Fatalf("Expected /webapp/db got %s", key)
- }
- if value.ID != childContainer.ID {
- t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID)
- }
- }
-}
-
-func TestDestroyWithInitLayer(t *testing.T) {
- daemon := mkDaemon(t)
- defer nuke(daemon)
-
- container, _, err := daemon.Create(&runconfig.Config{
- Image: GetTestImage(daemon).ID,
- Cmd: runconfig.NewCommand("ls", "-al"),
- },
- &runconfig.HostConfig{},
- "")
-
- if err != nil {
- t.Fatal(err)
- }
- // Destroy
- if err := daemon.Rm(container); err != nil {
- t.Fatal(err)
- }
-
- // Make sure daemon.Exists() behaves correctly
- if daemon.Exists("test_destroy") {
- t.Fatalf("Exists() returned true")
- }
-
- // Make sure daemon.List() doesn't list the destroyed container
- if len(daemon.List()) != 0 {
- t.Fatalf("Expected 0 container, %v found", len(daemon.List()))
- }
-
- driver := daemon.Graph().Driver()
-
- // Make sure that the container does not exist in the driver
- if _, err := driver.Get(container.ID, ""); err == nil {
- t.Fatal("Container should not exist in the driver")
- }
-
- // Make sure that the init layer is removed from the driver
- if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil {
- t.Fatal("Container's init layer should not exist in the driver")
- }
-}
diff --git a/integration/utils.go b/integration/utils.go
deleted file mode 100644
index 62e02e9bb1..0000000000
--- a/integration/utils.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package docker
-
-import (
- "bufio"
- "fmt"
- "io"
- "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/daemon"
-)
-
-func closeWrap(args ...io.Closer) error {
- e := false
- ret := fmt.Errorf("Error closing elements")
- for _, c := range args {
- if err := c.Close(); err != nil {
- e = true
- ret = fmt.Errorf("%s\n%s", ret, err)
- }
- }
- if e {
- return ret
- }
- return nil
-}
-
-func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container {
- var container *daemon.Container
-
- setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
- for {
- l := globalDaemon.List()
- if len(l) == 1 && l[0].IsRunning() {
- container = l[0]
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- })
-
- if container == nil {
- t.Fatal("An error occurred while waiting for the container to start")
- }
-
- return container
-}
-
-func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
- c := make(chan bool)
-
- // Make sure we are not too long
- go func() {
- time.Sleep(d)
- c <- true
- }()
- go func() {
- f()
- c <- false
- }()
- if <-c && msg != "" {
- t.Fatal(msg)
- }
-}
-
-func expectPipe(expected string, r io.Reader) error {
- o, err := bufio.NewReader(r).ReadString('\n')
- if err != nil {
- return err
- }
- if strings.Trim(o, " \r\n") != expected {
- return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o)
- }
- return nil
-}
-
-func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error {
- for i := 0; i < count; i++ {
- if _, err := w.Write([]byte(input)); err != nil {
- return err
- }
- if err := expectPipe(output, r); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/integration/utils_test.go b/integration/utils_test.go
deleted file mode 100644
index 9479d4296c..0000000000
--- a/integration/utils_test.go
+++ /dev/null
@@ -1,348 +0,0 @@
-package docker
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "path"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/daemon"
- "github.com/docker/docker/daemon/networkdriver/bridge"
- "github.com/docker/docker/engine"
- "github.com/docker/docker/graph"
- flag "github.com/docker/docker/pkg/mflag"
- "github.com/docker/docker/registry"
- "github.com/docker/docker/runconfig"
- "github.com/docker/docker/utils"
-)
-
-type Fataler interface {
- Fatal(...interface{})
-}
-
-// This file contains utility functions for docker's unit test suite.
-// It has to be named XXX_test.go, apparently, in other to access private functions
-// from other XXX_test.go functions.
-
-// Create a temporary daemon suitable for unit testing.
-// Call t.Fatal() at the first error.
-func mkDaemon(f Fataler) *daemon.Daemon {
- eng := newTestEngine(f, false, "")
- return mkDaemonFromEngine(eng, f)
-}
-
-func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) {
- containerId, _, err := getDaemon(eng).ContainerCreate(name, config, &runconfig.HostConfig{})
- if err != nil {
- f.Fatal(err)
- }
- return containerId
-}
-
-func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) {
- return createNamedTestContainer(eng, config, f, "")
-}
-
-func startContainer(eng *engine.Engine, id string, t Fataler) {
- if err := getDaemon(eng).ContainerStart(id, &runconfig.HostConfig{}); err != nil {
- t.Fatal(err)
- }
-}
-
-func containerRun(eng *engine.Engine, id string, t Fataler) {
- startContainer(eng, id, t)
- containerWait(eng, id, t)
-}
-
-func containerFileExists(eng *engine.Engine, id, dir string, t Fataler) bool {
- c := getContainer(eng, id, t)
- if err := c.Mount(); err != nil {
- t.Fatal(err)
- }
- defer c.Unmount()
- if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
- if os.IsNotExist(err) {
- return false
- }
- t.Fatal(err)
- }
- return true
-}
-
-func containerAttach(eng *engine.Engine, id string, t Fataler) (io.WriteCloser, io.ReadCloser) {
- c := getContainer(eng, id, t)
- i := c.StdinPipe()
- o := c.StdoutPipe()
- return i, o
-}
-
-func containerWait(eng *engine.Engine, id string, t Fataler) int {
- ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second)
- return ex
-}
-
-func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error {
- _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond)
- return err
-}
-
-func containerKill(eng *engine.Engine, id string, t Fataler) {
- if err := getDaemon(eng).ContainerKill(id, 0); err != nil {
- t.Fatal(err)
- }
-}
-
-func containerRunning(eng *engine.Engine, id string, t Fataler) bool {
- return getContainer(eng, id, t).IsRunning()
-}
-
-func containerAssertExists(eng *engine.Engine, id string, t Fataler) {
- getContainer(eng, id, t)
-}
-
-func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) {
- daemon := mkDaemonFromEngine(eng, t)
- if c, _ := daemon.Get(id); c != nil {
- t.Fatal(fmt.Errorf("Container %s should not exist", id))
- }
-}
-
-// assertHttpNotError expect the given response to not have an error.
-// Otherwise the it causes the test to fail.
-func assertHttpNotError(r *httptest.ResponseRecorder, t Fataler) {
- // Non-error http status are [200, 400)
- if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
- t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
- }
-}
-
-// assertHttpError expect the given response to have an error.
-// Otherwise the it causes the test to fail.
-func assertHttpError(r *httptest.ResponseRecorder, t Fataler) {
- // Non-error http status are [200, 400)
- if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
- t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
- }
-}
-
-func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container {
- daemon := mkDaemonFromEngine(eng, t)
- c, err := daemon.Get(id)
- if err != nil {
- t.Fatal(err)
- }
- return c
-}
-
-func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon {
- iDaemon := eng.HackGetGlobalVar("httpapi.daemon")
- if iDaemon == nil {
- panic("Legacy daemon field not set in engine")
- }
- daemon, ok := iDaemon.(*daemon.Daemon)
- if !ok {
- panic("Legacy daemon field in engine does not cast to *daemon.Daemon")
- }
- return daemon
-}
-
-func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine {
- if root == "" {
- if dir, err := newTestDirectory(unitTestStoreBase); err != nil {
- t.Fatal(err)
- } else {
- root = dir
- }
- }
- os.MkdirAll(root, 0700)
-
- eng := engine.New()
- eng.Logging = false
-
- // (This is manually copied and modified from main() until we have a more generic plugin system)
- cfg := &daemon.Config{
- Root: root,
- AutoRestart: autorestart,
- ExecDriver: "native",
- // Either InterContainerCommunication or EnableIptables must be set,
- // otherwise NewDaemon will fail because of conflicting settings.
- Bridge: bridge.Config{
- InterContainerCommunication: true,
- },
- TrustKeyPath: filepath.Join(root, "key.json"),
- LogConfig: runconfig.LogConfig{Type: "json-file"},
- }
- d, err := daemon.NewDaemon(cfg, eng, registry.NewService(nil))
- if err != nil {
- t.Fatal(err)
- }
- if err := d.Install(eng); err != nil {
- t.Fatal(err)
- }
- return eng
-}
-
-func NewTestEngine(t Fataler) *engine.Engine {
- return newTestEngine(t, false, "")
-}
-
-func newTestDirectory(templateDir string) (dir string, err error) {
- return utils.TestDirectory(templateDir)
-}
-
-func getCallerName(depth int) string {
- return utils.GetCallerName(depth)
-}
-
-// Write `content` to the file at path `dst`, creating it if necessary,
-// as well as any missing directories.
-// The file is truncated if it already exists.
-// Call t.Fatal() at the first error.
-func writeFile(dst, content string, t *testing.T) {
- // Create subdirectories if necessary
- if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
- t.Fatal(err)
- }
- f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
- if err != nil {
- t.Fatal(err)
- }
- // Write content (truncate if it exists)
- if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
- t.Fatal(err)
- }
-}
-
-// Return the contents of file at path `src`.
-// Call t.Fatal() at the first error (including if the file doesn't exist)
-func readFile(src string, t *testing.T) (content string) {
- f, err := os.Open(src)
- if err != nil {
- t.Fatal(err)
- }
- data, err := ioutil.ReadAll(f)
- if err != nil {
- t.Fatal(err)
- }
- return string(data)
-}
-
-// Create a test container from the given daemon `r` and run arguments `args`.
-// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
-// dynamically replaced by the current test image.
-// The caller is responsible for destroying the container.
-// Call t.Fatal() at the first error.
-func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) {
- config, hc, _, err := parseRun(args)
- defer func() {
- if err != nil && t != nil {
- t.Fatal(err)
- }
- }()
- if err != nil {
- return nil, nil, err
- }
- if config.Image == "_" {
- config.Image = GetTestImage(r).ID
- }
- c, _, err := r.Create(config, nil, "")
- if err != nil {
- return nil, nil, err
- }
- // NOTE: hostConfig is ignored.
- // If `args` specify privileged mode, custom lxc conf, external mount binds,
- // port redirects etc. they will be ignored.
- // This is because the correct way to set these things is to pass environment
- // to the `start` job.
- // FIXME: this helper function should be deprecated in favor of calling
- // `create` and `start` jobs directly.
- return c, hc, nil
-}
-
-// Create a test container, start it, wait for it to complete, destroy it,
-// and return its standard output as a string.
-// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
-// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
-func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) {
- defer func() {
- if err != nil && t != nil {
- t.Fatal(err)
- }
- }()
- container, hc, err := mkContainer(r, args, t)
- if err != nil {
- return "", err
- }
- defer r.Rm(container)
- stdout := container.StdoutPipe()
- defer stdout.Close()
-
- job := eng.Job("start", container.ID)
- if err := job.ImportEnv(hc); err != nil {
- return "", err
- }
- if err := job.Run(); err != nil {
- return "", err
- }
-
- container.WaitStop(-1 * time.Second)
- data, err := ioutil.ReadAll(stdout)
- if err != nil {
- return "", err
- }
- output = string(data)
- return
-}
-
-// FIXME: this is duplicated from graph_test.go in the docker package.
-func fakeTar() (io.ReadCloser, error) {
- content := []byte("Hello world!\n")
- buf := new(bytes.Buffer)
- tw := tar.NewWriter(buf)
- for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
- hdr := new(tar.Header)
- hdr.Size = int64(len(content))
- hdr.Name = name
- if err := tw.WriteHeader(hdr); err != nil {
- return nil, err
- }
- tw.Write([]byte(content))
- }
- tw.Close()
- return ioutil.NopCloser(buf), nil
-}
-
-func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) []*types.Image {
- config := graph.ImagesConfig{
- Filter: filter,
- All: all,
- }
- images, err := getDaemon(eng).Repositories().Images(&config)
- if err != nil {
- t.Fatal(err)
- }
-
- return images
-}
-
-func parseRun(args []string) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) {
- cmd := flag.NewFlagSet("run", flag.ContinueOnError)
- cmd.SetOutput(ioutil.Discard)
- cmd.Usage = nil
- return runconfig.Parse(cmd, args)
-}
-
-func getDaemon(eng *engine.Engine) *daemon.Daemon {
- return eng.HackGetGlobalVar("httpapi.daemon").(*daemon.Daemon)
-}
diff --git a/integration/z_final_test.go b/integration/z_final_test.go
deleted file mode 100644
index d6ef2884f2..0000000000
--- a/integration/z_final_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package docker
-
-import (
- "runtime"
- "testing"
-
- "github.com/docker/docker/pkg/fileutils"
-)
-
-func displayFdGoroutines(t *testing.T) {
- t.Logf("File Descriptors: %d, Goroutines: %d", fileutils.GetTotalUsedFds(), runtime.NumGoroutine())
-}
-
-func TestFinal(t *testing.T) {
- nuke(globalDaemon)
- t.Logf("Start File Descriptors: %d, Start Goroutines: %d", startFds, startGoroutines)
- displayFdGoroutines(t)
-}
diff --git a/links/links.go b/links/links.go
index 935bff4ae3..a756c8b0e5 100644
--- a/links/links.go
+++ b/links/links.go
@@ -5,9 +5,7 @@ import (
"path"
"strings"
- "github.com/docker/docker/daemon/networkdriver/bridge"
"github.com/docker/docker/nat"
- "github.com/docker/docker/pkg/iptables"
)
type Link struct {
@@ -140,26 +138,10 @@ func (l *Link) getDefaultPort() *nat.Port {
}
func (l *Link) Enable() error {
- // -A == iptables append flag
- if err := l.toggle("-A", false); err != nil {
- return err
- }
- // call this on Firewalld reload
- iptables.OnReloaded(func() { l.toggle("-I", false) })
l.IsEnabled = true
return nil
}
func (l *Link) Disable() {
- // We do not care about errors here because the link may not
- // exist in iptables
- // -D == iptables delete flag
- l.toggle("-D", true)
- // call this on Firewalld reload
- iptables.OnReloaded(func() { l.toggle("-D", true) })
l.IsEnabled = false
}
-
-func (l *Link) toggle(action string, ignoreErrors bool) error {
- return bridge.LinkContainers(action, l.ParentIP, l.ChildIP, l.Ports, ignoreErrors)
-}
diff --git a/nat/sort.go b/nat/sort.go
index f36c12f7bb..6441936ff9 100644
--- a/nat/sort.go
+++ b/nat/sort.go
@@ -1,6 +1,10 @@
package nat
-import "sort"
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
type portSorter struct {
ports []Port
@@ -26,3 +30,63 @@ func Sort(ports []Port, predicate func(i, j Port) bool) {
s := &portSorter{ports, predicate}
sort.Sort(s)
}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ bindings[p] = []PortBinding{}
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+}
+
+func toInt(s string) int64 {
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/nat/sort_test.go b/nat/sort_test.go
index 5d490e321b..ba24cdbcb9 100644
--- a/nat/sort_test.go
+++ b/nat/sort_test.go
@@ -2,6 +2,7 @@ package nat
import (
"fmt"
+ "reflect"
"testing"
)
@@ -39,3 +40,46 @@ func TestSortSamePortWithDifferentProto(t *testing.T) {
t.Fail()
}
}
+
+func TestSortPortMap(t *testing.T) {
+ ports := []Port{
+ Port("22/tcp"),
+ Port("22/udp"),
+ Port("8000/tcp"),
+ Port("6379/tcp"),
+ Port("9999/tcp"),
+ }
+
+ portMap := PortMap{
+ Port("22/tcp"): []PortBinding{
+ {},
+ },
+ Port("8000/tcp"): []PortBinding{
+ {},
+ },
+ Port("6379/tcp"): []PortBinding{
+ {},
+ {HostIp: "0.0.0.0", HostPort: "32749"},
+ },
+ Port("9999/tcp"): []PortBinding{
+ {HostIp: "0.0.0.0", HostPort: "40000"},
+ },
+ }
+
+ SortPortMap(ports, portMap)
+ if !reflect.DeepEqual(ports, []Port{
+ Port("9999/tcp"),
+ Port("6379/tcp"),
+ Port("8000/tcp"),
+ Port("22/tcp"),
+ Port("22/udp"),
+ }) {
+ t.Errorf("failed to prioritize port with explicit mappings, got %v", ports)
+ }
+ if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{
+ {HostIp: "0.0.0.0", HostPort: "32749"},
+ {},
+ }) {
+ t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm)
+ }
+}
diff --git a/opts/opts.go b/opts/opts.go
index d2c32f13c7..c330c27a5d 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -14,9 +14,13 @@ import (
)
var (
- alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
- domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
- DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+ DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+ // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
+ // is not supplied. A better longer term solution would be to use a named
+ // pipe as the default on the Windows daemon.
+ DefaultHTTPPort = 2375 // Default HTTP Port
DefaultUnixSocket = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket
)
@@ -24,6 +28,14 @@ func ListVar(values *[]string, names []string, usage string) {
flag.Var(newListOptsRef(values, nil), names, usage)
}
+func MapVar(values map[string]string, names []string, usage string) {
+ flag.Var(newMapOpt(values, nil), names, usage)
+}
+
+func LogOptsVar(values map[string]string, names []string, usage string) {
+ flag.Var(newMapOpt(values, ValidateLogOpts), names, usage)
+}
+
func HostListVar(values *[]string, names []string, usage string) {
flag.Var(newListOptsRef(values, ValidateHost), names, usage)
}
@@ -126,10 +138,53 @@ func (opts *ListOpts) Len() int {
return len((*opts.values))
}
+//MapOpts type
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+func newMapOpt(values map[string]string, validator ValidatorFctType) *MapOpts {
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
// Validators
type ValidatorFctType func(val string) (string, error)
type ValidatorFctListType func(val string) ([]string, error)
+func ValidateLogOpts(val string) (string, error) {
+ allowedKeys := map[string]string{}
+ vals := strings.Split(val, "=")
+ if allowedKeys[vals[0]] != "" {
+ return val, nil
+ }
+ return "", fmt.Errorf("%s is not a valid log opt", vals[0])
+}
+
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
@@ -141,7 +196,7 @@ func ValidateAttach(val string) (string, error) {
}
func ValidateLink(val string) (string, error) {
- if _, err := parsers.PartParser("name:alias", val); err != nil {
+ if _, _, err := parsers.ParseLink(val); err != nil {
return val, err
}
return val, nil
diff --git a/opts/opts_test.go b/opts/opts_test.go
index 8370926da5..dfad430ac4 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -1,6 +1,7 @@
package opts
import (
+ "fmt"
"strings"
"testing"
)
@@ -28,6 +29,31 @@ func TestValidateIPAddress(t *testing.T) {
}
+func TestMapOpts(t *testing.T) {
+ tmpMap := make(map[string]string)
+ o := newMapOpt(tmpMap, logOptsValidator)
+ o.Set("max-size=1")
+ if o.String() != "map[max-size:1]" {
+ t.Errorf("%s != [map[max-size:1]", o.String())
+ }
+
+ o.Set("max-file=2")
+ if len(tmpMap) != 2 {
+ t.Errorf("map length %d != 2", len(tmpMap))
+ }
+
+ if tmpMap["max-file"] != "2" {
+ t.Errorf("max-file = %s != 2", tmpMap["max-file"])
+ }
+
+ if tmpMap["max-size"] != "1" {
+ t.Errorf("max-size = %s != 1", tmpMap["max-size"])
+ }
+ if o.Set("dummy-val=3") == nil {
+ t.Errorf("validator is not being called")
+ }
+}
+
func TestValidateMACAddress(t *testing.T) {
if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil {
t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err)
@@ -152,3 +178,12 @@ func TestValidateExtraHosts(t *testing.T) {
}
}
}
+
+func logOptsValidator(val string) (string, error) {
+ allowedKeys := map[string]string{"max-size": "1", "max-file": "2"}
+ vals := strings.Split(val, "=")
+ if allowedKeys[vals[0]] != "" {
+ return val, nil
+ }
+ return "", fmt.Errorf("invalid key %s", vals[0])
+}
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index 4d8d260087..cde4de55a0 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -1,6 +1,7 @@
package archive
import (
+ "archive/tar"
"bufio"
"bytes"
"compress/bzip2"
@@ -11,13 +12,11 @@ import (
"io/ioutil"
"os"
"os/exec"
- "path"
"path/filepath"
+ "runtime"
"strings"
"syscall"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/pools"
@@ -292,17 +291,8 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
file.Close()
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
- mode := uint32(hdr.Mode & 07777)
- switch hdr.Typeflag {
- case tar.TypeBlock:
- mode |= syscall.S_IFBLK
- case tar.TypeChar:
- mode |= syscall.S_IFCHR
- case tar.TypeFifo:
- mode |= syscall.S_IFIFO
- }
-
- if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
@@ -338,8 +328,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
}
- if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
- return err
+ // Lchown is not supported on Windows
+ if runtime.GOOS != "windows" {
+ if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
+ return err
+ }
}
for key, value := range hdr.Xattrs {
@@ -350,20 +343,12 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
- if hdr.Typeflag == tar.TypeLink {
- if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
- }
- } else if hdr.Typeflag != tar.TypeSymlink {
- if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
- return err
- }
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
}
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
- // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
+ // syscall.UtimesNano doesn't support a NOFOLLOW flag atm
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
@@ -532,7 +517,7 @@ loop:
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0777)
+ err = system.MkdirAll(parentPath, 0777)
if err != nil {
return err
}
@@ -652,7 +637,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
- if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+ if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
@@ -676,12 +661,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
if srcSt.IsDir() {
return fmt.Errorf("Can't copy a directory")
}
- // Clean up the trailing /
- if dst[len(dst)-1] == '/' {
- dst = path.Join(dst, filepath.Base(src))
+ // Clean up the trailing slash
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
- if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
return err
}
@@ -794,9 +779,6 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
- if err = f.Sync(); err != nil {
- return nil, err
- }
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go
index ae9b5a8cd2..f24f628c2f 100644
--- a/pkg/archive/archive_test.go
+++ b/pkg/archive/archive_test.go
@@ -1,6 +1,7 @@
package archive
import (
+ "archive/tar"
"bytes"
"fmt"
"io"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/docker/docker/pkg/system"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
func TestIsArchiveNilHeader(t *testing.T) {
diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go
index 82c9a82c1a..8a15cfebe5 100644
--- a/pkg/archive/archive_unix.go
+++ b/pkg/archive/archive_unix.go
@@ -3,11 +3,12 @@
package archive
import (
+ "archive/tar"
"errors"
"os"
"syscall"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+ "github.com/docker/docker/pkg/system"
)
// canonicalTarNameForPath returns platform-specific filepath
@@ -52,3 +53,37 @@ func major(device uint64) uint64 {
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go
index 6caef3b735..10db4bd00e 100644
--- a/pkg/archive/archive_windows.go
+++ b/pkg/archive/archive_windows.go
@@ -3,11 +3,10 @@
package archive
import (
+ "archive/tar"
"fmt"
"os"
"strings"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
// canonicalTarNameForPath returns platform-specific filepath
@@ -15,11 +14,11 @@ import (
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
- // into forward slashes. since windows does not allow '/' or '\'
+ // into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
- return "", fmt.Errorf("windows path contains forward slash: %s", p)
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
@@ -39,3 +38,13 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go
index 06fad8eb4f..affafad39e 100644
--- a/pkg/archive/changes.go
+++ b/pkg/archive/changes.go
@@ -1,6 +1,7 @@
package archive
import (
+ "archive/tar"
"bytes"
"fmt"
"io"
@@ -11,8 +12,6 @@ import (
"syscall"
"time"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
@@ -175,10 +174,6 @@ func (info *FileInfo) path() string {
return filepath.Join(info.parent.path(), info.name)
}
-func (info *FileInfo) isDir() bool {
- return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
-}
-
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
@@ -215,13 +210,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
- if oldStat.Mode() != newStat.Mode() ||
- oldStat.Uid() != newStat.Uid() ||
- oldStat.Gid() != newStat.Gid() ||
- oldStat.Rdev() != newStat.Rdev() ||
- // Don't look at size for dirs, its not a good measure of change
- (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
- (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) ||
+ if statDifferent(oldStat, newStat) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
diff --git a/pkg/archive/changes_unix.go b/pkg/archive/changes_unix.go
new file mode 100644
index 0000000000..d780f16398
--- /dev/null
+++ b/pkg/archive/changes_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package archive
+
+import (
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.Uid() != newStat.Uid() ||
+ oldStat.Gid() != newStat.Gid() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
diff --git a/pkg/archive/changes_windows.go b/pkg/archive/changes_windows.go
new file mode 100644
index 0000000000..4809b7a59c
--- /dev/null
+++ b/pkg/archive/changes_windows.go
@@ -0,0 +1,20 @@
+package archive
+
+import (
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.ModTime() != newStat.ModTime() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.IsDir()
+}
diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go
index b5eb63fd44..fd4946078b 100644
--- a/pkg/archive/diff.go
+++ b/pkg/archive/diff.go
@@ -1,6 +1,7 @@
package archive
import (
+ "archive/tar"
"fmt"
"io"
"io/ioutil"
@@ -9,8 +10,6 @@ import (
"strings"
"syscall"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
@@ -48,7 +47,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 0600)
+ err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go
index 758c4115d5..01ed437280 100644
--- a/pkg/archive/diff_test.go
+++ b/pkg/archive/diff_test.go
@@ -1,9 +1,8 @@
package archive
import (
+ "archive/tar"
"testing"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
func TestApplyLayerInvalidFilenames(t *testing.T) {
diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go
index 9048027203..2a266c2fdf 100644
--- a/pkg/archive/utils_test.go
+++ b/pkg/archive/utils_test.go
@@ -1,6 +1,7 @@
package archive
import (
+ "archive/tar"
"bytes"
"fmt"
"io"
@@ -8,8 +9,6 @@ import (
"os"
"path/filepath"
"time"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
var testUntarFns = map[string]func(string, io.Reader) error{
diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go
index b8b60197a3..dfb335c0b6 100644
--- a/pkg/archive/wrap.go
+++ b/pkg/archive/wrap.go
@@ -1,8 +1,8 @@
package archive
import (
+ "archive/tar"
"bytes"
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io/ioutil"
)
diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go
index bb89f7fac2..e7f17b88c4 100644
--- a/pkg/devicemapper/devmapper.go
+++ b/pkg/devicemapper/devmapper.go
@@ -55,6 +55,7 @@ var (
ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
+ ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
ErrNilCookie = errors.New("cookie ptr can't be nil")
ErrAttachLoopbackDevice = errors.New("loopback mounting failed")
@@ -69,9 +70,11 @@ var (
ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity")
ErrBusy = errors.New("Device is Busy")
ErrDeviceIdExists = errors.New("Device Id Exists")
+ ErrEnxio = errors.New("No such device or address")
dmSawBusy bool
dmSawExist bool
+ dmSawEnxio bool // No Such Device or Address
)
type (
@@ -84,16 +87,17 @@ type (
Device []uint64
}
Info struct {
- Exists int
- Suspended int
- LiveTable int
- InactiveTable int
- OpenCount int32
- EventNr uint32
- Major uint32
- Minor uint32
- ReadOnly int
- TargetCount int32
+ Exists int
+ Suspended int
+ LiveTable int
+ InactiveTable int
+ OpenCount int32
+ EventNr uint32
+ Major uint32
+ Minor uint32
+ ReadOnly int
+ TargetCount int32
+ DeferredRemove int
}
TaskType int
AddNodeType int
@@ -219,6 +223,14 @@ func (t *Task) GetInfo() (*Info, error) {
return info, nil
}
+func (t *Task) GetInfoWithDeferred() (*Info, error) {
+ info := &Info{}
+ if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
+ return nil, ErrTaskGetInfo
+ }
+ return info, nil
+}
+
func (t *Task) GetDriverVersion() (string, error) {
res := DmTaskGetDriverVersion(t.unmanaged)
if res == "" {
@@ -371,6 +383,55 @@ func RemoveDevice(name string) error {
return nil
}
+func RemoveDeviceDeferred(name string) error {
+ logrus.Debugf("[devmapper] RemoveDeviceDeferred START(%s)", name)
+ defer logrus.Debugf("[devmapper] RemoveDeviceDeferred END(%s)", name)
+ task, err := TaskCreateNamed(DeviceRemove, name)
+ if task == nil {
+ return err
+ }
+
+ if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
+ return ErrTaskDeferredRemove
+ }
+
+ if err = task.Run(); err != nil {
+ return fmt.Errorf("Error running RemoveDeviceDeferred %s", err)
+ }
+
+ return nil
+}
+
+// Useful helper for cleanup
+func CancelDeferredRemove(deviceName string) error {
+ task, err := TaskCreateNamed(DeviceTargetMsg, deviceName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.SetSector(0); err != nil {
+ return fmt.Errorf("Can't set sector %s", err)
+ }
+
+ if err := task.SetMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
+ return fmt.Errorf("Can't set message %s", err)
+ }
+
+ dmSawBusy = false
+ dmSawEnxio = false
+ if err := task.Run(); err != nil {
+ // A device might be being deleted already
+ if dmSawBusy {
+ return ErrBusy
+ } else if dmSawEnxio {
+ return ErrEnxio
+ }
+ return fmt.Errorf("Error running CancelDeferredRemove %s", err)
+
+ }
+ return nil
+}
+
func GetBlockDeviceSize(file *os.File) (uint64, error) {
size, err := ioctlBlkGetSize64(file.Fd())
if err != nil {
@@ -479,6 +540,17 @@ func GetInfo(name string) (*Info, error) {
return task.GetInfo()
}
+func GetInfoWithDeferred(name string) (*Info, error) {
+ task, err := TaskCreateNamed(DeviceInfo, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.Run(); err != nil {
+ return nil, err
+ }
+ return task.GetInfoWithDeferred()
+}
+
func GetDriverVersion() (string, error) {
task := TaskCreate(DeviceVersion)
if task == nil {
diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go
index d6550bd626..f66a20884b 100644
--- a/pkg/devicemapper/devmapper_log.go
+++ b/pkg/devicemapper/devmapper_log.go
@@ -22,6 +22,10 @@ func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_cla
if strings.Contains(msg, "File exists") {
dmSawExist = true
}
+
+ if strings.Contains(msg, "No such device or address") {
+ dmSawEnxio = true
+ }
}
if dmLogger != nil {
diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go
index e436cca32d..87c200376f 100644
--- a/pkg/devicemapper/devmapper_wrapper.go
+++ b/pkg/devicemapper/devmapper_wrapper.go
@@ -90,28 +90,30 @@ const (
)
var (
- DmGetLibraryVersion = dmGetLibraryVersionFct
- DmGetNextTarget = dmGetNextTargetFct
- DmLogInitVerbose = dmLogInitVerboseFct
- DmSetDevDir = dmSetDevDirFct
- DmTaskAddTarget = dmTaskAddTargetFct
- DmTaskCreate = dmTaskCreateFct
- DmTaskDestroy = dmTaskDestroyFct
- DmTaskGetDeps = dmTaskGetDepsFct
- DmTaskGetInfo = dmTaskGetInfoFct
- DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
- DmTaskRun = dmTaskRunFct
- DmTaskSetAddNode = dmTaskSetAddNodeFct
- DmTaskSetCookie = dmTaskSetCookieFct
- DmTaskSetMessage = dmTaskSetMessageFct
- DmTaskSetName = dmTaskSetNameFct
- DmTaskSetRo = dmTaskSetRoFct
- DmTaskSetSector = dmTaskSetSectorFct
- DmUdevWait = dmUdevWaitFct
- DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
- DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
- DmCookieSupported = dmCookieSupportedFct
- LogWithErrnoInit = logWithErrnoInitFct
+ DmGetLibraryVersion = dmGetLibraryVersionFct
+ DmGetNextTarget = dmGetNextTargetFct
+ DmLogInitVerbose = dmLogInitVerboseFct
+ DmSetDevDir = dmSetDevDirFct
+ DmTaskAddTarget = dmTaskAddTargetFct
+ DmTaskCreate = dmTaskCreateFct
+ DmTaskDestroy = dmTaskDestroyFct
+ DmTaskGetDeps = dmTaskGetDepsFct
+ DmTaskGetInfo = dmTaskGetInfoFct
+ DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
+ DmTaskRun = dmTaskRunFct
+ DmTaskSetAddNode = dmTaskSetAddNodeFct
+ DmTaskSetCookie = dmTaskSetCookieFct
+ DmTaskSetMessage = dmTaskSetMessageFct
+ DmTaskSetName = dmTaskSetNameFct
+ DmTaskSetRo = dmTaskSetRoFct
+ DmTaskSetSector = dmTaskSetSectorFct
+ DmUdevWait = dmUdevWaitFct
+ DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
+ DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
+ DmCookieSupported = dmCookieSupportedFct
+ LogWithErrnoInit = logWithErrnoInitFct
+ DmTaskDeferredRemove = dmTaskDeferredRemoveFct
+ DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
)
func free(p *C.char) {
diff --git a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
new file mode 100644
index 0000000000..ced482c965
--- /dev/null
+++ b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
@@ -0,0 +1,33 @@
+// +build linux,!libdm_no_deferred_remove
+
+package devicemapper
+
+/*
+#cgo LDFLAGS: -L. -ldevmapper
+#include
+*/
+import "C"
+
+const LibraryDeferredRemovalSupport = true
+
+func dmTaskDeferredRemoveFct(task *CDmTask) int {
+ return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
+}
+
+func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int {
+ Cinfo := C.struct_dm_info{}
+ defer func() {
+ info.Exists = int(Cinfo.exists)
+ info.Suspended = int(Cinfo.suspended)
+ info.LiveTable = int(Cinfo.live_table)
+ info.InactiveTable = int(Cinfo.inactive_table)
+ info.OpenCount = int32(Cinfo.open_count)
+ info.EventNr = uint32(Cinfo.event_nr)
+ info.Major = uint32(Cinfo.major)
+ info.Minor = uint32(Cinfo.minor)
+ info.ReadOnly = int(Cinfo.read_only)
+ info.TargetCount = int32(Cinfo.target_count)
+ info.DeferredRemove = int(Cinfo.deferred_remove)
+ }()
+ return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
diff --git a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
new file mode 100644
index 0000000000..16631bf19c
--- /dev/null
+++ b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
@@ -0,0 +1,14 @@
+// +build linux,libdm_no_deferred_remove
+
+package devicemapper
+
+const LibraryDeferredRemovalSupport = false
+
+func dmTaskDeferredRemoveFct(task *CDmTask) int {
+ // Error. Nobody should be calling it.
+ return -1
+}
+
+func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int {
+ return -1
+}
diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go
index 0af978e068..d220487ad5 100644
--- a/pkg/ioutils/readers_test.go
+++ b/pkg/ioutils/readers_test.go
@@ -2,11 +2,91 @@ package ioutils
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
+ "strings"
"testing"
)
+// Implement io.Reader
+type errorReader struct{}
+
+func (r *errorReader) Read(p []byte) (int, error) {
+ return 0, fmt.Errorf("Error reader always fail.")
+}
+
+func TestReadCloserWrapperClose(t *testing.T) {
+ reader := strings.NewReader("A string reader")
+ wrapper := NewReadCloserWrapper(reader, func() error {
+ return fmt.Errorf("This will be called when closing")
+ })
+ err := wrapper.Close()
+ if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
+ t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
+ }
+}
+
+func TestReaderErrWrapperReadOnError(t *testing.T) {
+ called := false
+ reader := &errorReader{}
+ wrapper := NewReaderErrWrapper(reader, func() {
+ called = true
+ })
+ _, err := wrapper.Read([]byte{})
+ if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
+ t.Fatalf("readErrWrapper should returned an error")
+ }
+ if !called {
+ t.Fatalf("readErrWrapper should have call the anonymous function on failure")
+ }
+}
+
+func TestReaderErrWrapperRead(t *testing.T) {
+ reader := strings.NewReader("a string reader.")
+ wrapper := NewReaderErrWrapper(reader, func() {
+ t.Fatalf("readErrWrapper should not have called the anonymous function on failure")
+ })
+ // Read 20 byte (should be ok with the string above)
+ num, err := wrapper.Read(make([]byte, 20))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if num != 16 {
+ t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
+ }
+}
+
+func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) {
+ reader, writer := io.Pipe()
+
+ drainBuffer := make([]byte, 1024)
+ buffer := bytes.Buffer{}
+ bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer)
+
+ // Write everything down to a Pipe
+ // Usually, a pipe should block but because of the buffered reader,
+ // the writes will go through
+ done := make(chan bool)
+ go func() {
+ writer.Write([]byte("hello world"))
+ writer.Close()
+ done <- true
+ }()
+
+ // Drain the reader *after* everything has been written, just to verify
+ // it is indeed buffering
+ <-done
+
+ output, err := ioutil.ReadAll(bufreader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(output, []byte("hello world")) {
+ t.Error(string(output))
+ }
+}
+
func TestBufReader(t *testing.T) {
reader, writer := io.Pipe()
bufreader := NewBufReader(reader)
@@ -33,6 +113,50 @@ func TestBufReader(t *testing.T) {
}
}
+func TestBufReaderCloseWithNonReaderCloser(t *testing.T) {
+ reader := strings.NewReader("buffer")
+ bufreader := NewBufReader(reader)
+
+ if err := bufreader.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+}
+
+// implements io.ReadCloser
+type simpleReaderCloser struct{}
+
+func (r *simpleReaderCloser) Read(p []byte) (n int, err error) {
+ return 0, nil
+}
+
+func (r *simpleReaderCloser) Close() error {
+ return nil
+}
+
+func TestBufReaderCloseWithReaderCloser(t *testing.T) {
+ reader := &simpleReaderCloser{}
+ bufreader := NewBufReader(reader)
+
+ err := bufreader.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+}
+
+func TestHashData(t *testing.T) {
+ reader := strings.NewReader("hash-me")
+ actual, err := HashData(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
+ if actual != expected {
+ t.Fatalf("Expecting %s, got %s", expected, actual)
+ }
+}
+
type repeatedReader struct {
readCount int
maxReads int
diff --git a/pkg/ioutils/writeflusher.go b/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000000..25095474df
--- /dev/null
+++ b/pkg/ioutils/writeflusher.go
@@ -0,0 +1,47 @@
+package ioutils
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+type WriteFlusher struct {
+ sync.Mutex
+ w io.Writer
+ flusher http.Flusher
+ flushed bool
+}
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ wf.Lock()
+ defer wf.Unlock()
+ n, err = wf.w.Write(b)
+ wf.flushed = true
+ wf.flusher.Flush()
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ wf.Lock()
+ defer wf.Unlock()
+ wf.flushed = true
+ wf.flusher.Flush()
+}
+
+func (wf *WriteFlusher) Flushed() bool {
+ wf.Lock()
+ defer wf.Unlock()
+ return wf.flushed
+}
+
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var flusher http.Flusher
+ if f, ok := w.(http.Flusher); ok {
+ flusher = f
+ } else {
+ flusher = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: flusher}
+}
diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go
index 80d7f7f795..564b1cd4f5 100644
--- a/pkg/ioutils/writers_test.go
+++ b/pkg/ioutils/writers_test.go
@@ -6,6 +6,30 @@ import (
"testing"
)
+func TestWriteCloserWrapperClose(t *testing.T) {
+ called := false
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NewWriteCloserWrapper(writer, func() error {
+ called = true
+ return nil
+ })
+ if err := wrapper.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if !called {
+ t.Fatalf("writeCloserWrapper should have call the anonymous function.")
+ }
+}
+
+func TestNopWriteCloser(t *testing.T) {
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NopWriteCloser(writer)
+ if err := wrapper.Close(); err != nil {
+ t.Fatal("NopWriteCloser always return nil on Close.")
+ }
+
+}
+
func TestNopWriter(t *testing.T) {
nw := &NopWriter{}
l, err := nw.Write([]byte{'c'})
diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go
index 261c64cdc3..85afb3b503 100644
--- a/pkg/jsonlog/jsonlog.go
+++ b/pkg/jsonlog/jsonlog.go
@@ -5,8 +5,6 @@ import (
"fmt"
"io"
"time"
-
- "github.com/Sirupsen/logrus"
)
type JSONLog struct {
@@ -32,16 +30,21 @@ func (jl *JSONLog) Reset() {
jl.Created = time.Time{}
}
-func WriteLog(src io.Reader, dst io.Writer, format string) error {
+func WriteLog(src io.Reader, dst io.Writer, format string, since time.Time) error {
dec := json.NewDecoder(src)
l := &JSONLog{}
for {
- if err := dec.Decode(l); err == io.EOF {
- return nil
- } else if err != nil {
- logrus.Printf("Error streaming logs: %s", err)
+ l.Reset()
+ if err := dec.Decode(l); err != nil {
+ if err == io.EOF {
+ return nil
+ }
return err
}
+ if !since.IsZero() && l.Created.Before(since) {
+ continue
+ }
+
line, err := l.Format(format)
if err != nil {
return err
@@ -49,6 +52,5 @@ func WriteLog(src io.Reader, dst io.Writer, format string) error {
if _, err := io.WriteString(dst, line); err != nil {
return err
}
- l.Reset()
}
}
diff --git a/pkg/jsonlog/jsonlog_test.go b/pkg/jsonlog/jsonlog_test.go
index fa53825b93..d4b26fcb43 100644
--- a/pkg/jsonlog/jsonlog_test.go
+++ b/pkg/jsonlog/jsonlog_test.go
@@ -21,7 +21,7 @@ func TestWriteLog(t *testing.T) {
}
w := bytes.NewBuffer(nil)
format := timeutils.RFC3339NanoFixed
- if err := WriteLog(&buf, w, format); err != nil {
+ if err := WriteLog(&buf, w, format, time.Time{}); err != nil {
t.Fatal(err)
}
res := w.String()
@@ -52,7 +52,7 @@ func BenchmarkWriteLog(b *testing.B) {
b.SetBytes(int64(r.Len()))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- if err := WriteLog(r, w, format); err != nil {
+ if err := WriteLog(r, w, format, time.Time{}); err != nil {
b.Fatal(err)
}
b.StopTimer()
diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go
index f0d20d99b0..ce003cd074 100644
--- a/pkg/mflag/flag.go
+++ b/pkg/mflag/flag.go
@@ -560,7 +560,7 @@ func defaultUsage(f *FlagSet) {
// Usage prints to standard error a usage message documenting all defined command-line flags.
// The function is a variable that may be changed to point to a custom function.
var Usage = func() {
- fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0])
PrintDefaults()
}
@@ -1085,7 +1085,7 @@ func (cmd *FlagSet) ReportError(str string, withHelp bool) {
str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'"
}
}
- fmt.Fprintf(cmd.Out(), "docker: %s.\n", str)
+ fmt.Fprintf(cmd.Out(), "docker: %s\n", str)
os.Exit(1)
}
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index 1087ece992..b081cc7db3 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -3,7 +3,8 @@ package namesgenerator
import (
"fmt"
"math/rand"
- "time"
+
+ "github.com/docker/docker/pkg/random"
)
var (
@@ -309,7 +310,7 @@ var (
"yonath",
}
- rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
+ rnd = rand.New(random.NewSource())
)
func GetRandomName(retry int) string {
diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go
index 70d09003a3..5f7930684a 100644
--- a/pkg/parsers/kernel/kernel.go
+++ b/pkg/parsers/kernel/kernel.go
@@ -1,3 +1,5 @@
+// +build !windows
+
package kernel
import (
diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go
new file mode 100644
index 0000000000..399d63e5f0
--- /dev/null
+++ b/pkg/parsers/kernel/kernel_windows.go
@@ -0,0 +1,65 @@
+package kernel
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+type KernelVersionInfo struct {
+ kvi string
+ major int
+ minor int
+ build int
+}
+
+func (k *KernelVersionInfo) String() string {
+ return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
+}
+
+func GetKernelVersion() (*KernelVersionInfo, error) {
+
+ var (
+ h syscall.Handle
+ dwVersion uint32
+ err error
+ )
+
+ KVI := &KernelVersionInfo{"Unknown", 0, 0, 0}
+
+ if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+ syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+ 0,
+ syscall.KEY_READ,
+ &h); err != nil {
+ return KVI, err
+ }
+ defer syscall.RegCloseKey(h)
+
+ var buf [1 << 10]uint16
+ var typ uint32
+ n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+ if err = syscall.RegQueryValueEx(h,
+ syscall.StringToUTF16Ptr("BuildLabEx"),
+ nil,
+ &typ,
+ (*byte)(unsafe.Pointer(&buf[0])),
+ &n); err != nil {
+ return KVI, err
+ }
+
+ KVI.kvi = syscall.UTF16ToString(buf[:])
+
+ // Important - docker.exe MUST be manifested for this API to return
+ // the correct information.
+ if dwVersion, err = syscall.GetVersion(); err != nil {
+ return KVI, err
+ }
+
+ KVI.major = int(dwVersion & 0xFF)
+ KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
+
+ return KVI, nil
+}
diff --git a/pkg/parsers/operatingsystem/operatingsystem.go b/pkg/parsers/operatingsystem/operatingsystem_linux.go
similarity index 100%
rename from pkg/parsers/operatingsystem/operatingsystem.go
rename to pkg/parsers/operatingsystem/operatingsystem_linux.go
diff --git a/pkg/parsers/operatingsystem/operatingsystem_windows.go b/pkg/parsers/operatingsystem/operatingsystem_windows.go
new file mode 100644
index 0000000000..c843c6f849
--- /dev/null
+++ b/pkg/parsers/operatingsystem/operatingsystem_windows.go
@@ -0,0 +1,47 @@
+package operatingsystem
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c
+// for a similar sample
+
+func GetOperatingSystem() (string, error) {
+
+ var h syscall.Handle
+
+ // Default return value
+ ret := "Unknown Operating System"
+
+ if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+ syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+ 0,
+ syscall.KEY_READ,
+ &h); err != nil {
+ return ret, err
+ }
+ defer syscall.RegCloseKey(h)
+
+ var buf [1 << 10]uint16
+ var typ uint32
+ n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+ if err := syscall.RegQueryValueEx(h,
+ syscall.StringToUTF16Ptr("ProductName"),
+ nil,
+ &typ,
+ (*byte)(unsafe.Pointer(&buf[0])),
+ &n); err != nil {
+ return ret, err
+ }
+ ret = syscall.UTF16ToString(buf[:])
+
+ return ret, nil
+}
+
+// No-op on Windows
+func IsContainerized() (bool, error) {
+ return false, nil
+}
diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go
index 59e294dc22..32d87734f8 100644
--- a/pkg/parsers/parsers.go
+++ b/pkg/parsers/parsers.go
@@ -2,6 +2,7 @@ package parsers
import (
"fmt"
+ "runtime"
"strconv"
"strings"
)
@@ -10,7 +11,12 @@ import (
func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
addr = strings.TrimSpace(addr)
if addr == "" {
- addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
+ if runtime.GOOS != "windows" {
+ addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
+ } else {
+ // Note - defaultTCPAddr already includes tcp:// prefix
+ addr = fmt.Sprintf("%s", defaultTCPAddr)
+ }
}
addrParts := strings.Split(addr, "://")
if len(addrParts) == 1 {
@@ -135,3 +141,17 @@ func ParsePortRange(ports string) (uint64, uint64, error) {
}
return start, end, nil
}
+
+func ParseLink(val string) (string, string, error) {
+ if val == "" {
+ return "", "", fmt.Errorf("empty string specified for links")
+ }
+ arr := strings.Split(val, ":")
+ if len(arr) > 2 {
+ return "", "", fmt.Errorf("bad format for links: %s", val)
+ }
+ if len(arr) == 1 {
+ return val, val, nil
+ }
+ return arr[0], arr[1], nil
+}
diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go
index bc9a1e943c..89f4ae023a 100644
--- a/pkg/parsers/parsers_test.go
+++ b/pkg/parsers/parsers_test.go
@@ -123,3 +123,35 @@ func TestParsePortRangeIncorrectStartRange(t *testing.T) {
t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
}
}
+
+func TestParseLink(t *testing.T) {
+ name, alias, err := ParseLink("name:alias")
+ if err != nil {
+ t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err)
+ }
+ if name != "name" {
+ t.Fatalf("Link name should have been name, got %s instead", name)
+ }
+ if alias != "alias" {
+ t.Fatalf("Link alias should have been alias, got %s instead", alias)
+ }
+ // short format definition
+ name, alias, err = ParseLink("name")
+ if err != nil {
+ t.Fatalf("Expected not to error out on a valid name only format but got: %v", err)
+ }
+ if name != "name" {
+ t.Fatalf("Link name should have been name, got %s instead", name)
+ }
+ if alias != "name" {
+ t.Fatalf("Link alias should have been name, got %s instead", alias)
+ }
+ // empty string link definition is not allowed
+ if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") {
+ t.Fatalf("Expected error 'empty string specified for links' but got: %v", err)
+ }
+ // more than two colons are not allowed
+ if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") {
+ t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err)
+ }
+}
diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go
index 21a5438799..3e57073658 100644
--- a/pkg/pidfile/pidfile.go
+++ b/pkg/pidfile/pidfile.go
@@ -3,7 +3,6 @@ package pidfile
import (
"fmt"
"io/ioutil"
- "log"
"os"
"path/filepath"
"strconv"
@@ -24,20 +23,19 @@ func checkPidFileAlreadyExists(path string) error {
return nil
}
-func New(path string) (file *PidFile, err error) {
+func New(path string) (*PidFile, error) {
if err := checkPidFileAlreadyExists(path); err != nil {
return nil, err
}
+ if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil {
+ return nil, err
+ }
- file = &PidFile{path: path}
- err = ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644)
-
- return file, err
+ return &PidFile{path: path}, nil
}
func (file PidFile) Remove() error {
if err := os.Remove(file.path); err != nil {
- log.Printf("Error removing %s: %s", file.path, err)
return err
}
return nil
diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go
new file mode 100644
index 0000000000..d531fa46fb
--- /dev/null
+++ b/pkg/plugins/client.go
@@ -0,0 +1,112 @@
+package plugins
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+const (
+ versionMimetype = "application/vnd.docker.plugins.v1+json"
+ defaultTimeOut = 30
+)
+
+func NewClient(addr string) *Client {
+ tr := &http.Transport{}
+ protoAndAddr := strings.Split(addr, "://")
+ configureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1])
+ return &Client{&http.Client{Transport: tr}, protoAndAddr[1]}
+}
+
+type Client struct {
+ http *http.Client
+ addr string
+}
+
+func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
+ return c.callWithRetry(serviceMethod, args, ret, true)
+}
+
+func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(args); err != nil {
+ return err
+ }
+
+ req, err := http.NewRequest("POST", "/"+serviceMethod, &buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Add("Accept", versionMimetype)
+ req.URL.Scheme = "http"
+ req.URL.Host = c.addr
+
+ var retries int
+ start := time.Now()
+
+ for {
+ resp, err := c.http.Do(req)
+ if err != nil {
+ if !retry {
+ return err
+ }
+
+ timeOff := backoff(retries)
+ if abort(start, timeOff) {
+ return err
+ }
+ retries++
+ logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
+ time.Sleep(timeOff)
+ continue
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ remoteErr, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil
+ }
+ return fmt.Errorf("Plugin Error: %s", remoteErr)
+ }
+
+ return json.NewDecoder(resp.Body).Decode(&ret)
+ }
+}
+
+func backoff(retries int) time.Duration {
+ b, max := 1, defaultTimeOut
+ for b < max && retries > 0 {
+ b *= 2
+ retries--
+ }
+ if b > max {
+ b = max
+ }
+ return time.Duration(b) * time.Second
+}
+
+func abort(start time.Time, timeOff time.Duration) bool {
+ return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second
+}
+
+func configureTCPTransport(tr *http.Transport, proto, addr string) {
+ // Why 32? See https://github.com/docker/docker/pull/8035.
+ timeout := 32 * time.Second
+ if proto == "unix" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, timeout)
+ }
+ } else {
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+ }
+}
diff --git a/pkg/plugins/client_test.go b/pkg/plugins/client_test.go
new file mode 100644
index 0000000000..0f7cd34dfa
--- /dev/null
+++ b/pkg/plugins/client_test.go
@@ -0,0 +1,105 @@
+package plugins
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+ "time"
+)
+
+var (
+ mux *http.ServeMux
+ server *httptest.Server
+)
+
+func setupRemotePluginServer() string {
+ mux = http.NewServeMux()
+ server = httptest.NewServer(mux)
+ return server.URL
+}
+
+func teardownRemotePluginServer() {
+ if server != nil {
+ server.Close()
+ }
+}
+
+func TestFailedConnection(t *testing.T) {
+ c := NewClient("tcp://127.0.0.1:1")
+ err := c.callWithRetry("Service.Method", nil, nil, false)
+ if err == nil {
+ t.Fatal("Unexpected successful connection")
+ }
+}
+
+func TestEchoInputOutput(t *testing.T) {
+ addr := setupRemotePluginServer()
+ defer teardownRemotePluginServer()
+
+ m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}}
+
+ mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ t.Fatalf("Expected POST, got %s\n", r.Method)
+ }
+
+ header := w.Header()
+ header.Set("Content-Type", versionMimetype)
+
+ io.Copy(w, r.Body)
+ })
+
+ c := NewClient(addr)
+ var output Manifest
+ err := c.Call("Test.Echo", m, &output)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(output, m) {
+ t.Fatalf("Expected %v, was %v\n", m, output)
+ }
+}
+
+func TestBackoff(t *testing.T) {
+ cases := []struct {
+ retries int
+ expTimeOff time.Duration
+ }{
+ {0, time.Duration(1)},
+ {1, time.Duration(2)},
+ {2, time.Duration(4)},
+ {4, time.Duration(16)},
+ {6, time.Duration(30)},
+ {10, time.Duration(30)},
+ }
+
+ for _, c := range cases {
+ s := c.expTimeOff * time.Second
+ if d := backoff(c.retries); d != s {
+ t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
+ }
+ }
+}
+
+func TestAbortRetry(t *testing.T) {
+ cases := []struct {
+ timeOff time.Duration
+ expAbort bool
+ }{
+ {time.Duration(1), false},
+ {time.Duration(2), false},
+ {time.Duration(10), false},
+ {time.Duration(30), true},
+ {time.Duration(40), true},
+ }
+
+ for _, c := range cases {
+ s := c.timeOff * time.Second
+ if a := abort(time.Now(), s); a != c.expAbort {
+ t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
+ }
+ }
+}
diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go
new file mode 100644
index 0000000000..3a42ba6d17
--- /dev/null
+++ b/pkg/plugins/discovery.go
@@ -0,0 +1,78 @@
+package plugins
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const defaultLocalRegistry = "/usr/share/docker/plugins"
+
+var (
+ ErrNotFound = errors.New("Plugin not found")
+)
+
+type Registry interface {
+ Plugins() ([]*Plugin, error)
+ Plugin(name string) (*Plugin, error)
+}
+
+type LocalRegistry struct {
+ path string
+}
+
+func newLocalRegistry(path string) *LocalRegistry {
+ if len(path) == 0 {
+ path = defaultLocalRegistry
+ }
+
+ return &LocalRegistry{path}
+}
+
+func (l *LocalRegistry) Plugin(name string) (*Plugin, error) {
+ filepath := filepath.Join(l.path, name)
+ specpath := filepath + ".spec"
+ if fi, err := os.Stat(specpath); err == nil {
+ return readPluginInfo(specpath, fi)
+ }
+ socketpath := filepath + ".sock"
+ if fi, err := os.Stat(socketpath); err == nil {
+ return readPluginInfo(socketpath, fi)
+ }
+ return nil, ErrNotFound
+}
+
+func readPluginInfo(path string, fi os.FileInfo) (*Plugin, error) {
+ name := strings.Split(fi.Name(), ".")[0]
+
+ if fi.Mode()&os.ModeSocket != 0 {
+ return &Plugin{
+ Name: name,
+ Addr: "unix://" + path,
+ }, nil
+ }
+
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ addr := strings.TrimSpace(string(content))
+
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(u.Scheme) == 0 {
+ return nil, fmt.Errorf("Unknown protocol")
+ }
+
+ return &Plugin{
+ Name: name,
+ Addr: addr,
+ }, nil
+}
diff --git a/pkg/plugins/discovery_test.go b/pkg/plugins/discovery_test.go
new file mode 100644
index 0000000000..b6e66e289c
--- /dev/null
+++ b/pkg/plugins/discovery_test.go
@@ -0,0 +1,108 @@
+package plugins
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestUnknownLocalPath(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ l := newLocalRegistry(filepath.Join(tmpdir, "unknown"))
+ _, err = l.Plugin("foo")
+ if err == nil || err != ErrNotFound {
+ t.Fatalf("Expected error for unknown directory")
+ }
+}
+
+func TestLocalSocket(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ l, err := net.Listen("unix", filepath.Join(tmpdir, "echo.sock"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ r := newLocalRegistry(tmpdir)
+ p, err := r.Plugin("echo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pp, err := r.Plugin("echo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(p, pp) {
+ t.Fatalf("Expected %v, was %v\n", p, pp)
+ }
+
+ if p.Name != "echo" {
+ t.Fatalf("Expected plugin `echo`, got %s\n", p.Name)
+ }
+
+ addr := fmt.Sprintf("unix://%s/echo.sock", tmpdir)
+ if p.Addr != addr {
+ t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr)
+ }
+}
+
+func TestFileSpecPlugin(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ path string
+ name string
+ addr string
+ fail bool
+ }{
+ {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
+ {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false},
+ {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport
+ }
+
+ for _, c := range cases {
+ if err = os.MkdirAll(path.Dir(c.path), 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err = ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ r := newLocalRegistry(tmpdir)
+ p, err := r.Plugin(c.name)
+ if c.fail && err == nil {
+ continue
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.Name != c.name {
+ t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name)
+ }
+
+ if p.Addr != c.addr {
+ t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr)
+ }
+ os.Remove(c.path)
+ }
+}
diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go
new file mode 100644
index 0000000000..47519486bd
--- /dev/null
+++ b/pkg/plugins/plugins.go
@@ -0,0 +1,100 @@
+package plugins
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var (
+ ErrNotImplements = errors.New("Plugin does not implement the requested driver")
+)
+
+type plugins struct {
+ sync.Mutex
+ plugins map[string]*Plugin
+}
+
+var (
+ storage = plugins{plugins: make(map[string]*Plugin)}
+ extpointHandlers = make(map[string]func(string, *Client))
+)
+
+type Manifest struct {
+ Implements []string
+}
+
+type Plugin struct {
+ Name string
+ Addr string
+ Client *Client
+ Manifest *Manifest
+}
+
+func (p *Plugin) activate() error {
+ m := new(Manifest)
+ p.Client = NewClient(p.Addr)
+ err := p.Client.Call("Plugin.Activate", nil, m)
+ if err != nil {
+ return err
+ }
+
+ logrus.Debugf("%s's manifest: %v", p.Name, m)
+ p.Manifest = m
+ for _, iface := range m.Implements {
+ handler, handled := extpointHandlers[iface]
+ if !handled {
+ continue
+ }
+ handler(p.Name, p.Client)
+ }
+ return nil
+}
+
+func load(name string) (*Plugin, error) {
+ registry := newLocalRegistry("")
+ pl, err := registry.Plugin(name)
+ if err != nil {
+ return nil, err
+ }
+ if err := pl.activate(); err != nil {
+ return nil, err
+ }
+ return pl, nil
+}
+
+func get(name string) (*Plugin, error) {
+ storage.Lock()
+ defer storage.Unlock()
+ pl, ok := storage.plugins[name]
+ if ok {
+ return pl, nil
+ }
+ pl, err := load(name)
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Plugin: %v", pl)
+ storage.plugins[name] = pl
+ return pl, nil
+}
+
+func Get(name, imp string) (*Plugin, error) {
+ pl, err := get(name)
+ if err != nil {
+ return nil, err
+ }
+ for _, driver := range pl.Manifest.Implements {
+ logrus.Debugf("%s implements: %s", name, driver)
+ if driver == imp {
+ return pl, nil
+ }
+ }
+ return nil, ErrNotImplements
+}
+
+func Handle(iface string, fn func(string, *Client)) {
+ extpointHandlers[iface] = fn
+}
diff --git a/pkg/random/random.go b/pkg/random/random.go
new file mode 100644
index 0000000000..05b7f7fb37
--- /dev/null
+++ b/pkg/random/random.go
@@ -0,0 +1,34 @@
+package random
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// copypaste from standard math/rand
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// NewSource returns math/rand.Source safe for concurrent use and initialized
+// with current unix-nano timestamp
+func NewSource() rand.Source {
+ return &lockedSource{
+ src: rand.NewSource(time.Now().UnixNano()),
+ }
+}
diff --git a/pkg/random/random_test.go b/pkg/random/random_test.go
new file mode 100644
index 0000000000..cf405f78cb
--- /dev/null
+++ b/pkg/random/random_test.go
@@ -0,0 +1,22 @@
+package random
+
+import (
+ "math/rand"
+ "sync"
+ "testing"
+)
+
+// for go test -v -race
+func TestConcurrency(t *testing.T) {
+ rnd := rand.New(NewSource())
+ var wg sync.WaitGroup
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ rnd.Int63()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go
index a579318e82..4adcd8f13e 100644
--- a/pkg/reexec/command_unsupported.go
+++ b/pkg/reexec/command_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!windows
package reexec
diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go
new file mode 100644
index 0000000000..124d42fc62
--- /dev/null
+++ b/pkg/reexec/command_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ }
+}
diff --git a/pkg/requestdecorator/README.md b/pkg/requestdecorator/README.md
deleted file mode 100644
index 76f8ca798f..0000000000
--- a/pkg/requestdecorator/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-This package provides helper functions for decorating a request with user agent
-versions, auth, meta headers.
diff --git a/pkg/requestdecorator/requestdecorator.go b/pkg/requestdecorator/requestdecorator.go
deleted file mode 100644
index c236e3fe3f..0000000000
--- a/pkg/requestdecorator/requestdecorator.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Package requestdecorator provides helper functions to decorate a request with
-// user agent versions, auth, meta headers.
-package requestdecorator
-
-import (
- "errors"
- "io"
- "net/http"
- "strings"
-
- "github.com/Sirupsen/logrus"
-)
-
-var (
- ErrNilRequest = errors.New("request cannot be nil")
-)
-
-// UAVersionInfo is used to model UserAgent versions.
-type UAVersionInfo struct {
- Name string
- Version string
-}
-
-func NewUAVersionInfo(name, version string) UAVersionInfo {
- return UAVersionInfo{
- Name: name,
- Version: version,
- }
-}
-
-func (vi *UAVersionInfo) isValid() bool {
- const stopChars = " \t\r\n/"
- name := vi.Name
- vers := vi.Version
- if len(name) == 0 || strings.ContainsAny(name, stopChars) {
- return false
- }
- if len(vers) == 0 || strings.ContainsAny(vers, stopChars) {
- return false
- }
- return true
-}
-
-// Convert versions to a string and append the string to the string base.
-//
-// Each UAVersionInfo will be converted to a string in the format of
-// "product/version", where the "product" is get from the name field, while
-// version is get from the version field. Several pieces of verson information
-// will be concatinated and separated by space.
-func appendVersions(base string, versions ...UAVersionInfo) string {
- if len(versions) == 0 {
- return base
- }
-
- verstrs := make([]string, 0, 1+len(versions))
- if len(base) > 0 {
- verstrs = append(verstrs, base)
- }
-
- for _, v := range versions {
- if !v.isValid() {
- continue
- }
- verstrs = append(verstrs, v.Name+"/"+v.Version)
- }
- return strings.Join(verstrs, " ")
-}
-
-// Decorator is used to change an instance of
-// http.Request. It could be used to add more header fields,
-// change body, etc.
-type Decorator interface {
- // ChangeRequest() changes the request accordingly.
- // The changed request will be returned or err will be non-nil
- // if an error occur.
- ChangeRequest(req *http.Request) (newReq *http.Request, err error)
-}
-
-// UserAgentDecorator appends the product/version to the user agent field
-// of a request.
-type UserAgentDecorator struct {
- Versions []UAVersionInfo
-}
-
-func (h *UserAgentDecorator) ChangeRequest(req *http.Request) (*http.Request, error) {
- if req == nil {
- return req, ErrNilRequest
- }
-
- userAgent := appendVersions(req.UserAgent(), h.Versions...)
- if len(userAgent) > 0 {
- req.Header.Set("User-Agent", userAgent)
- }
- return req, nil
-}
-
-type MetaHeadersDecorator struct {
- Headers map[string][]string
-}
-
-func (h *MetaHeadersDecorator) ChangeRequest(req *http.Request) (*http.Request, error) {
- if h.Headers == nil {
- return req, ErrNilRequest
- }
- for k, v := range h.Headers {
- req.Header[k] = v
- }
- return req, nil
-}
-
-type AuthDecorator struct {
- login string
- password string
-}
-
-func NewAuthDecorator(login, password string) Decorator {
- return &AuthDecorator{
- login: login,
- password: password,
- }
-}
-
-func (self *AuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) {
- if req == nil {
- return req, ErrNilRequest
- }
- req.SetBasicAuth(self.login, self.password)
- return req, nil
-}
-
-// RequestFactory creates an HTTP request
-// and applies a list of decorators on the request.
-type RequestFactory struct {
- decorators []Decorator
-}
-
-func NewRequestFactory(d ...Decorator) *RequestFactory {
- return &RequestFactory{
- decorators: d,
- }
-}
-
-func (f *RequestFactory) AddDecorator(d ...Decorator) {
- f.decorators = append(f.decorators, d...)
-}
-
-func (f *RequestFactory) GetDecorators() []Decorator {
- return f.decorators
-}
-
-// NewRequest() creates a new *http.Request,
-// applies all decorators in the Factory on the request,
-// then applies decorators provided by d on the request.
-func (h *RequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...Decorator) (*http.Request, error) {
- req, err := http.NewRequest(method, urlStr, body)
- if err != nil {
- return nil, err
- }
-
- // By default, a nil factory should work.
- if h == nil {
- return req, nil
- }
- for _, dec := range h.decorators {
- req, _ = dec.ChangeRequest(req)
- }
- for _, dec := range d {
- req, _ = dec.ChangeRequest(req)
- }
- logrus.Debugf("%v -- HEADERS: %v", req.URL, req.Header)
- return req, err
-}
diff --git a/pkg/requestdecorator/requestdecorator_test.go b/pkg/requestdecorator/requestdecorator_test.go
deleted file mode 100644
index ed61135467..0000000000
--- a/pkg/requestdecorator/requestdecorator_test.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package requestdecorator
-
-import (
- "net/http"
- "strings"
- "testing"
-)
-
-func TestUAVersionInfo(t *testing.T) {
- uavi := NewUAVersionInfo("foo", "bar")
- if !uavi.isValid() {
- t.Fatalf("UAVersionInfo should be valid")
- }
- uavi = NewUAVersionInfo("", "bar")
- if uavi.isValid() {
- t.Fatalf("Expected UAVersionInfo to be invalid")
- }
- uavi = NewUAVersionInfo("foo", "")
- if uavi.isValid() {
- t.Fatalf("Expected UAVersionInfo to be invalid")
- }
-}
-
-func TestUserAgentDecorator(t *testing.T) {
- httpVersion := make([]UAVersionInfo, 2)
- httpVersion = append(httpVersion, NewUAVersionInfo("testname", "testversion"))
- httpVersion = append(httpVersion, NewUAVersionInfo("name", "version"))
- uad := &UserAgentDecorator{
- Versions: httpVersion,
- }
-
- req, err := http.NewRequest("GET", "/something", strings.NewReader("test"))
- if err != nil {
- t.Fatal(err)
- }
- reqDecorated, err := uad.ChangeRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- if reqDecorated.Header.Get("User-Agent") != "testname/testversion name/version" {
- t.Fatalf("Request should have User-Agent 'testname/testversion name/version'")
- }
-}
-
-func TestUserAgentDecoratorErr(t *testing.T) {
- httpVersion := make([]UAVersionInfo, 0)
- uad := &UserAgentDecorator{
- Versions: httpVersion,
- }
-
- var req *http.Request
- _, err := uad.ChangeRequest(req)
- if err == nil {
- t.Fatalf("Expected to get ErrNilRequest instead no error was returned")
- }
-}
-
-func TestMetaHeadersDecorator(t *testing.T) {
- var headers = map[string][]string{
- "key1": {"value1"},
- "key2": {"value2"},
- }
- mhd := &MetaHeadersDecorator{
- Headers: headers,
- }
-
- req, err := http.NewRequest("GET", "/something", strings.NewReader("test"))
- if err != nil {
- t.Fatal(err)
- }
- reqDecorated, err := mhd.ChangeRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- v, ok := reqDecorated.Header["key1"]
- if !ok {
- t.Fatalf("Expected to have header key1")
- }
- if v[0] != "value1" {
- t.Fatalf("Expected value for key1 isn't value1")
- }
-
- v, ok = reqDecorated.Header["key2"]
- if !ok {
- t.Fatalf("Expected to have header key2")
- }
- if v[0] != "value2" {
- t.Fatalf("Expected value for key2 isn't value2")
- }
-}
-
-func TestMetaHeadersDecoratorErr(t *testing.T) {
- mhd := &MetaHeadersDecorator{}
-
- var req *http.Request
- _, err := mhd.ChangeRequest(req)
- if err == nil {
- t.Fatalf("Expected to get ErrNilRequest instead no error was returned")
- }
-}
-
-func TestAuthDecorator(t *testing.T) {
- ad := NewAuthDecorator("test", "password")
-
- req, err := http.NewRequest("GET", "/something", strings.NewReader("test"))
- if err != nil {
- t.Fatal(err)
- }
- reqDecorated, err := ad.ChangeRequest(req)
- if err != nil {
- t.Fatal(err)
- }
-
- username, password, ok := reqDecorated.BasicAuth()
- if !ok {
- t.Fatalf("Cannot retrieve basic auth info from request")
- }
- if username != "test" {
- t.Fatalf("Expected username to be test, got %s", username)
- }
- if password != "password" {
- t.Fatalf("Expected password to be password, got %s", password)
- }
-}
-
-func TestAuthDecoratorErr(t *testing.T) {
- ad := &AuthDecorator{}
-
- var req *http.Request
- _, err := ad.ChangeRequest(req)
- if err == nil {
- t.Fatalf("Expected to get ErrNilRequest instead no error was returned")
- }
-}
-
-func TestRequestFactory(t *testing.T) {
- ad := NewAuthDecorator("test", "password")
- httpVersion := make([]UAVersionInfo, 2)
- httpVersion = append(httpVersion, NewUAVersionInfo("testname", "testversion"))
- httpVersion = append(httpVersion, NewUAVersionInfo("name", "version"))
- uad := &UserAgentDecorator{
- Versions: httpVersion,
- }
-
- requestFactory := NewRequestFactory(ad, uad)
-
- if l := len(requestFactory.GetDecorators()); l != 2 {
- t.Fatalf("Expected to have two decorators, got %d", l)
- }
-
- req, err := requestFactory.NewRequest("GET", "/test", strings.NewReader("test"))
- if err != nil {
- t.Fatal(err)
- }
-
- username, password, ok := req.BasicAuth()
- if !ok {
- t.Fatalf("Cannot retrieve basic auth info from request")
- }
- if username != "test" {
- t.Fatalf("Expected username to be test, got %s", username)
- }
- if password != "password" {
- t.Fatalf("Expected password to be password, got %s", password)
- }
- if req.Header.Get("User-Agent") != "testname/testversion name/version" {
- t.Fatalf("Request should have User-Agent 'testname/testversion name/version'")
- }
-}
-
-func TestRequestFactoryNewRequestWithDecorators(t *testing.T) {
- ad := NewAuthDecorator("test", "password")
-
- requestFactory := NewRequestFactory(ad)
-
- if l := len(requestFactory.GetDecorators()); l != 1 {
- t.Fatalf("Expected to have one decorators, got %d", l)
- }
-
- ad2 := NewAuthDecorator("test2", "password2")
-
- req, err := requestFactory.NewRequest("GET", "/test", strings.NewReader("test"), ad2)
- if err != nil {
- t.Fatal(err)
- }
-
- username, password, ok := req.BasicAuth()
- if !ok {
- t.Fatalf("Cannot retrieve basic auth info from request")
- }
- if username != "test2" {
- t.Fatalf("Expected username to be test, got %s", username)
- }
- if password != "password2" {
- t.Fatalf("Expected password to be password, got %s", password)
- }
-}
-
-func TestRequestFactoryAddDecorator(t *testing.T) {
- requestFactory := NewRequestFactory()
-
- if l := len(requestFactory.GetDecorators()); l != 0 {
- t.Fatalf("Expected to have zero decorators, got %d", l)
- }
-
- ad := NewAuthDecorator("test", "password")
- requestFactory.AddDecorator(ad)
-
- if l := len(requestFactory.GetDecorators()); l != 1 {
- t.Fatalf("Expected to have one decorators, got %d", l)
- }
-}
-
-func TestRequestFactoryNil(t *testing.T) {
- var requestFactory RequestFactory
- _, err := requestFactory.NewRequest("GET", "/test", strings.NewReader("test"))
- if err != nil {
- t.Fatalf("Expected not to get and error, got %s", err)
- }
-}
diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go
index 7469dbcc21..3772db5e18 100644
--- a/pkg/signal/trap.go
+++ b/pkg/signal/trap.go
@@ -3,6 +3,7 @@ package signal
import (
"os"
gosignal "os/signal"
+ "runtime"
"sync/atomic"
"syscall"
@@ -14,41 +15,50 @@ import (
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
-// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is
-// skipped and the process terminated directly.
-// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup.
+// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
+// skipped and the process is terminated immediately (allows force quit of stuck daemon)
+// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
//
func Trap(cleanup func()) {
c := make(chan os.Signal, 1)
- signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
- if os.Getenv("DEBUG") == "" {
- signals = append(signals, syscall.SIGQUIT)
- }
+ // we will handle INT, TERM, QUIT here
+ signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}
gosignal.Notify(c, signals...)
go func() {
interruptCount := uint32(0)
for sig := range c {
go func(sig os.Signal) {
- logrus.Infof("Received signal '%v', starting shutdown of docker...", sig)
+ logrus.Infof("Processing signal '%v'", sig)
switch sig {
case os.Interrupt, syscall.SIGTERM:
- // If the user really wants to interrupt, let him do so.
if atomic.LoadUint32(&interruptCount) < 3 {
// Initiate the cleanup only once
if atomic.AddUint32(&interruptCount, 1) == 1 {
- // Call cleanup handler
+ // Call the provided cleanup handler
cleanup()
os.Exit(0)
} else {
return
}
} else {
- logrus.Infof("Force shutdown of docker, interrupting cleanup")
+ // 3 SIGTERM/INT signals received; force exit without cleanup
+ logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
}
case syscall.SIGQUIT:
+ DumpStacks()
+ logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
}
+ //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
os.Exit(128 + int(sig.(syscall.Signal)))
}(sig)
}
}()
}
+
+func DumpStacks() {
+ buf := make([]byte, 16384)
+ buf = buf[:runtime.Stack(buf, true)]
+ // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
+ // traces won't show up in the log.
+ logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
+}
diff --git a/pkg/sockets/README.md b/pkg/sockets/README.md
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/server/tcp_socket.go b/pkg/sockets/tcp_socket.go
similarity index 77%
rename from api/server/tcp_socket.go
rename to pkg/sockets/tcp_socket.go
index a1f57231a5..ac9edaebd1 100644
--- a/api/server/tcp_socket.go
+++ b/pkg/sockets/tcp_socket.go
@@ -1,4 +1,4 @@
-package server
+package sockets
import (
"crypto/tls"
@@ -11,27 +11,23 @@ import (
"github.com/docker/docker/pkg/listenbuffer"
)
-type tlsConfig struct {
+type TlsConfig struct {
CA string
Certificate string
Key string
Verify bool
}
-func tlsConfigFromServerConfig(conf *ServerConfig) *tlsConfig {
- verify := conf.TlsVerify
- if !conf.Tls && !conf.TlsVerify {
- return nil
- }
- return &tlsConfig{
+func NewTlsConfig(tlsCert, tlsKey, tlsCA string, verify bool) *TlsConfig {
+ return &TlsConfig{
Verify: verify,
- Certificate: conf.TlsCert,
- Key: conf.TlsKey,
- CA: conf.TlsCa,
+ Certificate: tlsCert,
+ Key: tlsKey,
+ CA: tlsCA,
}
}
-func NewTcpSocket(addr string, config *tlsConfig, activate <-chan struct{}) (net.Listener, error) {
+func NewTcpSocket(addr string, config *TlsConfig, activate <-chan struct{}) (net.Listener, error) {
l, err := listenbuffer.NewListenBuffer("tcp", addr, activate)
if err != nil {
return nil, err
@@ -44,7 +40,7 @@ func NewTcpSocket(addr string, config *tlsConfig, activate <-chan struct{}) (net
return l, nil
}
-func setupTls(l net.Listener, config *tlsConfig) (net.Listener, error) {
+func setupTls(l net.Listener, config *TlsConfig) (net.Listener, error) {
tlsCert, err := tls.LoadX509KeyPair(config.Certificate, config.Key)
if err != nil {
if os.IsNotExist(err) {
diff --git a/api/server/unix_socket.go b/pkg/sockets/unix_socket.go
similarity index 98%
rename from api/server/unix_socket.go
rename to pkg/sockets/unix_socket.go
index 157005da6f..0536382c8f 100644
--- a/api/server/unix_socket.go
+++ b/pkg/sockets/unix_socket.go
@@ -1,4 +1,6 @@
-package server
+// +build linux
+
+package sockets
import (
"fmt"
diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go
index 90f2b695d3..792ce00f94 100644
--- a/pkg/streamformatter/streamformatter.go
+++ b/pkg/streamformatter/streamformatter.go
@@ -12,8 +12,14 @@ type StreamFormatter struct {
json bool
}
-func NewStreamFormatter(json bool) *StreamFormatter {
- return &StreamFormatter{json}
+// NewStreamFormatter returns a simple StreamFormatter
+func NewStreamFormatter() *StreamFormatter {
+ return &StreamFormatter{}
+}
+
+// NewJSONStreamFormatter returns a StreamFormatter configured to stream json
+func NewJSONStreamFormatter() *StreamFormatter {
+ return &StreamFormatter{true}
}
const streamNewline = "\r\n"
@@ -62,7 +68,6 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessa
progress = &jsonmessage.JSONProgress{}
}
if sf.json {
-
b, err := json.Marshal(&jsonmessage.JSONMessage{
Status: action,
ProgressMessage: progress.String(),
@@ -81,10 +86,6 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessa
return []byte(action + " " + progress.String() + endl)
}
-func (sf *StreamFormatter) Json() bool {
- return sf.json
-}
-
type StdoutFormater struct {
io.Writer
*StreamFormatter
diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go
index 1dee05aa6c..acf81bef68 100644
--- a/pkg/streamformatter/streamformatter_test.go
+++ b/pkg/streamformatter/streamformatter_test.go
@@ -10,31 +10,55 @@ import (
)
func TestFormatStream(t *testing.T) {
- sf := NewStreamFormatter(true)
+ sf := NewStreamFormatter()
+ res := sf.FormatStream("stream")
+ if string(res) != "stream"+"\r" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestFormatJSONStatus(t *testing.T) {
+ sf := NewStreamFormatter()
+ res := sf.FormatStatus("ID", "%s%d", "a", 1)
+ if string(res) != "a1\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestFormatSimpleError(t *testing.T) {
+ sf := NewStreamFormatter()
+ res := sf.FormatError(errors.New("Error for formatter"))
+ if string(res) != "Error: Error for formatter\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatStream(t *testing.T) {
+ sf := NewJSONStreamFormatter()
res := sf.FormatStream("stream")
if string(res) != `{"stream":"stream"}`+"\r\n" {
t.Fatalf("%q", res)
}
}
-func TestFormatStatus(t *testing.T) {
- sf := NewStreamFormatter(true)
+func TestJSONFormatStatus(t *testing.T) {
+ sf := NewJSONStreamFormatter()
res := sf.FormatStatus("ID", "%s%d", "a", 1)
if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" {
t.Fatalf("%q", res)
}
}
-func TestFormatSimpleError(t *testing.T) {
- sf := NewStreamFormatter(true)
+func TestJSONFormatSimpleError(t *testing.T) {
+ sf := NewJSONStreamFormatter()
res := sf.FormatError(errors.New("Error for formatter"))
if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" {
t.Fatalf("%q", res)
}
}
-func TestFormatJSONError(t *testing.T) {
- sf := NewStreamFormatter(true)
+func TestJSONFormatJSONError(t *testing.T) {
+ sf := NewJSONStreamFormatter()
err := &jsonmessage.JSONError{Code: 50, Message: "Json error"}
res := sf.FormatError(err)
if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" {
@@ -42,8 +66,8 @@ func TestFormatJSONError(t *testing.T) {
}
}
-func TestFormatProgress(t *testing.T) {
- sf := NewStreamFormatter(true)
+func TestJSONFormatProgress(t *testing.T) {
+ sf := NewJSONStreamFormatter()
progress := &jsonmessage.JSONProgress{
Current: 15,
Total: 30,
diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go
index bf39df9b73..6a683b686a 100644
--- a/pkg/stringid/stringid.go
+++ b/pkg/stringid/stringid.go
@@ -4,19 +4,29 @@ import (
"crypto/rand"
"encoding/hex"
"io"
+ "regexp"
"strconv"
)
+const shortLen = 12
+
+var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
+
+// Determine if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+ return validShortID.MatchString(id)
+}
+
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
- shortLen := 12
+ trimTo := shortLen
if len(id) < shortLen {
- shortLen = len(id)
+ trimTo = len(id)
}
- return id[:shortLen]
+ return id[:trimTo]
}
// GenerateRandomID returns an unique id
diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go
index 21f8f8a2fb..bcb1365495 100644
--- a/pkg/stringid/stringid_test.go
+++ b/pkg/stringid/stringid_test.go
@@ -1,6 +1,9 @@
package stringid
-import "testing"
+import (
+ "strings"
+ "testing"
+)
func TestGenerateRandomID(t *testing.T) {
id := GenerateRandomID()
@@ -33,3 +36,21 @@ func TestShortenIdInvalid(t *testing.T) {
t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
}
}
+
+func TestIsShortIDNonHex(t *testing.T) {
+ id := "some non-hex value"
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+}
+
+func TestIsShortIDNotCorrectSize(t *testing.T) {
+ id := strings.Repeat("a", shortLen+1)
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+ id = strings.Repeat("a", shortLen-1)
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+}
diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go
index e3ebf5d1ed..aee2648b2d 100644
--- a/pkg/stringutils/stringutils.go
+++ b/pkg/stringutils/stringutils.go
@@ -2,9 +2,10 @@ package stringutils
import (
"bytes"
- mathrand "math/rand"
+ "math/rand"
"strings"
- "time"
+
+ "github.com/docker/docker/pkg/random"
)
// Generate alpha only random stirng with length n
@@ -12,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string {
// make a really long string
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]byte, n)
- r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano()))
+ r := rand.New(random.NewSource())
for i := range b {
b[i] = letters[r.Intn(len(letters))]
}
@@ -26,7 +27,7 @@ func GenerateRandomAsciiString(n int) string {
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
res := make([]byte, n)
for i := 0; i < n; i++ {
- res[i] = chars[mathrand.Intn(len(chars))]
+ res[i] = chars[rand.Intn(len(chars))]
}
return string(res)
}
diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go
index 195a03e9a8..5b7eca2458 100644
--- a/pkg/sysinfo/sysinfo.go
+++ b/pkg/sysinfo/sysinfo.go
@@ -1,64 +1,13 @@
package sysinfo
-import (
- "io/ioutil"
- "os"
- "path"
-
- "github.com/Sirupsen/logrus"
- "github.com/docker/libcontainer/cgroups"
-)
-
// SysInfo stores information about which features a kernel supports.
+// TODO Windows: Factor out platform specific capabilities.
type SysInfo struct {
MemoryLimit bool
SwapLimit bool
+ CpuCfsPeriod bool
CpuCfsQuota bool
IPv4ForwardingDisabled bool
AppArmor bool
-}
-
-// New returns a new SysInfo, using the filesystem to detect which features the kernel supports.
-func New(quiet bool) *SysInfo {
- sysInfo := &SysInfo{}
- if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
- if !quiet {
- logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err)
- }
- } else {
- // If memory cgroup is mounted, MemoryLimit is always enabled.
- sysInfo.MemoryLimit = true
-
- _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
- sysInfo.SwapLimit = err1 == nil
- if !sysInfo.SwapLimit && !quiet {
- logrus.Warn("Your kernel does not support swap memory limit.")
- }
- }
-
- if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil {
- if !quiet {
- logrus.Warnf("%v", err)
- }
- } else {
- _, err1 := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us"))
- sysInfo.CpuCfsQuota = err1 == nil
- if !sysInfo.CpuCfsQuota && !quiet {
- logrus.Warn("Your kernel does not support cgroup cfs quotas")
- }
- }
-
- // Check if AppArmor is supported.
- if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
- sysInfo.AppArmor = false
- } else {
- sysInfo.AppArmor = true
- }
-
- // Check if Devices cgroup is mounted, it is hard requirement for container security.
- if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil {
- logrus.Fatalf("Error mounting devices cgroup: %v", err)
- }
-
- return sysInfo
+ OomKillDisable bool
}
diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go
new file mode 100644
index 0000000000..396ea3b271
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_linux.go
@@ -0,0 +1,79 @@
+package sysinfo
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/libcontainer/cgroups"
+)
+
+// New returns a new SysInfo, using the filesystem to detect which features the kernel supports.
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
+ if !quiet {
+ logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err)
+ }
+ } else {
+ // If memory cgroup is mounted, MemoryLimit is always enabled.
+ sysInfo.MemoryLimit = true
+
+ _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
+ sysInfo.SwapLimit = err1 == nil
+ if !sysInfo.SwapLimit && !quiet {
+ logrus.Warn("Your kernel does not support swap memory limit.")
+ }
+
+ _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.oom_control"))
+ sysInfo.OomKillDisable = err == nil
+ if !sysInfo.OomKillDisable && !quiet {
+ logrus.Warnf("Your kernel does not support oom control.")
+ }
+ }
+
+ if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil {
+ if !quiet {
+ logrus.Warnf("%v", err)
+ }
+ } else {
+ _, err := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_period_us"))
+ sysInfo.CpuCfsPeriod = err == nil
+ if !sysInfo.CpuCfsPeriod && !quiet {
+ logrus.Warn("Your kernel does not support cgroup cfs period")
+ }
+ _, err = ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us"))
+ sysInfo.CpuCfsQuota = err == nil
+ if !sysInfo.CpuCfsQuota && !quiet {
+ logrus.Warn("Your kernel does not support cgroup cfs quotas")
+ }
+ }
+
+ // Checek if ipv4_forward is disabled.
+ if data, err := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward"); os.IsNotExist(err) {
+ sysInfo.IPv4ForwardingDisabled = true
+ } else {
+ if enabled, _ := strconv.Atoi(strings.TrimSpace(string(data))); enabled == 0 {
+ sysInfo.IPv4ForwardingDisabled = true
+ } else {
+ sysInfo.IPv4ForwardingDisabled = false
+ }
+ }
+
+ // Check if AppArmor is supported.
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
+ sysInfo.AppArmor = false
+ } else {
+ sysInfo.AppArmor = true
+ }
+
+ // Check if Devices cgroup is mounted, it is hard requirement for container security.
+ if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil {
+ logrus.Fatalf("Error mounting devices cgroup: %v", err)
+ }
+
+ return sysInfo
+}
diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go
new file mode 100644
index 0000000000..b4d31519f8
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_windows.go
@@ -0,0 +1,7 @@
+package sysinfo
+
+// TODO Windows
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ return sysInfo
+}
diff --git a/pkg/system/filesys.go b/pkg/system/filesys.go
new file mode 100644
index 0000000000..e1f70e8dac
--- /dev/null
+++ b/pkg/system/filesys.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+)
+
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go
new file mode 100644
index 0000000000..90b500608e
--- /dev/null
+++ b/pkg/system/filesys_windows.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go
index 801e756d8b..eee1be26eb 100644
--- a/pkg/system/lstat_windows.go
+++ b/pkg/system/lstat_windows.go
@@ -2,7 +2,28 @@
package system
+import (
+ "os"
+)
+
+// Some explanation for my own sanity, and hopefully maintainers in the
+// future.
+//
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+
func Lstat(path string) (*Stat_t, error) {
- // should not be called on cli code path
- return nil, ErrNotSupportedPlatform
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Stat_t{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
}
diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go
index b4020c11b6..1811542ab3 100644
--- a/pkg/system/mknod_windows.go
+++ b/pkg/system/mknod_windows.go
@@ -3,10 +3,9 @@
package system
func Mknod(path string, mode uint32, dev int) error {
- // should not be called on cli code path
return ErrNotSupportedPlatform
}
func Mkdev(major int64, minor int64) uint32 {
- panic("Mkdev not implemented on windows, should not be called on cli code")
+ panic("Mkdev not implemented on Windows.")
}
diff --git a/pkg/system/stat.go b/pkg/system/stat.go
index ba22b4dd9d..e2ecfe52fe 100644
--- a/pkg/system/stat.go
+++ b/pkg/system/stat.go
@@ -1,3 +1,5 @@
+// +build !windows
+
package system
import (
diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go
index 42d29d6cca..b1fd39e83f 100644
--- a/pkg/system/stat_windows.go
+++ b/pkg/system/stat_windows.go
@@ -3,15 +3,34 @@
package system
import (
- "errors"
- "syscall"
+ "os"
+ "time"
)
-func fromStatT(s *syscall.Win32FileAttributeData) (*Stat_t, error) {
- return nil, errors.New("fromStatT should not be called on windows path")
+type Stat_t struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
}
-func Stat(path string) (*Stat_t, error) {
- // should not be called on cli code path
- return nil, ErrNotSupportedPlatform
+func (s Stat_t) Name() string {
+ return s.name
+}
+
+func (s Stat_t) Size() int64 {
+ return s.size
+}
+
+func (s Stat_t) Mode() os.FileMode {
+ return s.mode
+}
+
+func (s Stat_t) ModTime() time.Time {
+ return s.modTime
+}
+
+func (s Stat_t) IsDir() bool {
+ return s.isDir
}
diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go
index 88fcbe4a94..a778bb0b9d 100644
--- a/pkg/tarsum/tarsum.go
+++ b/pkg/tarsum/tarsum.go
@@ -1,6 +1,7 @@
package tarsum
import (
+ "archive/tar"
"bytes"
"compress/gzip"
"crypto"
@@ -11,8 +12,6 @@ import (
"hash"
"io"
"strings"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
const (
diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go
index 26f12cc847..968d7c7cf1 100644
--- a/pkg/tarsum/tarsum_test.go
+++ b/pkg/tarsum/tarsum_test.go
@@ -1,6 +1,7 @@
package tarsum
import (
+ "archive/tar"
"bytes"
"compress/gzip"
"crypto/md5"
@@ -14,8 +15,6 @@ import (
"io/ioutil"
"os"
"testing"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
type testLayer struct {
diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go
index 0ceb5298a3..3cdc6ddaa4 100644
--- a/pkg/tarsum/versioning.go
+++ b/pkg/tarsum/versioning.go
@@ -1,12 +1,11 @@
package tarsum
import (
+ "archive/tar"
"errors"
"sort"
"strconv"
"strings"
-
- "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
// versioning of the TarSum algorithm
diff --git a/pkg/timeutils/utils.go b/pkg/timeutils/utils.go
new file mode 100644
index 0000000000..6af16a1d7f
--- /dev/null
+++ b/pkg/timeutils/utils.go
@@ -0,0 +1,29 @@
+package timeutils
+
+import (
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GetTimestamp tries to parse given string as RFC3339 time
+// or Unix timestamp (with seconds precision), if successful
+//returns a Unix timestamp as string otherwise returns value back.
+func GetTimestamp(value string) string {
+ var format string
+ if strings.Contains(value, ".") {
+ format = time.RFC3339Nano
+ } else {
+ format = time.RFC3339
+ }
+
+ loc := time.FixedZone(time.Now().Zone())
+ if len(value) < len(format) {
+ format = format[:len(value)]
+ }
+ t, err := time.ParseInLocation(format, value, loc)
+ if err != nil {
+ return value
+ }
+ return strconv.FormatInt(t.Unix(), 10)
+}
diff --git a/pkg/timeutils/utils_test.go b/pkg/timeutils/utils_test.go
new file mode 100644
index 0000000000..1d724fb2ac
--- /dev/null
+++ b/pkg/timeutils/utils_test.go
@@ -0,0 +1,36 @@
+package timeutils
+
+import (
+ "testing"
+)
+
+func TestGetTimestamp(t *testing.T) {
+ cases := []struct{ in, expected string }{
+ {"0", "-62167305600"}, // 0 gets parsed year 0
+
+ // Partial RFC3339 strings get parsed with second precision
+ {"2006-01-02T15:04:05.999999999+07:00", "1136189045"},
+ {"2006-01-02T15:04:05.999999999Z", "1136214245"},
+ {"2006-01-02T15:04:05.999999999", "1136214245"},
+ {"2006-01-02T15:04:05", "1136214245"},
+ {"2006-01-02T15:04", "1136214240"},
+ {"2006-01-02T15", "1136214000"},
+ {"2006-01-02T", "1136160000"},
+ {"2006-01-02", "1136160000"},
+ {"2006", "1136073600"},
+ {"2015-05-13T20:39:09Z", "1431549549"},
+
+ // unix timestamps returned as is
+ {"1136073600", "1136073600"},
+
+ // String fallback
+ {"invalid", "invalid"},
+ }
+
+ for _, c := range cases {
+ o := GetTimestamp(c.in)
+ if o != c.expected {
+ t.Fatalf("wrong value for '%s'. expected:'%s' got:'%s'", c.in, c.expected, o)
+ }
+ }
+}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/LICENSE b/pkg/transport/LICENSE
similarity index 92%
rename from vendor/src/github.com/go-fsnotify/fsnotify/LICENSE
rename to pkg/transport/LICENSE
index f21e540800..d02f24fd52 100644
--- a/vendor/src/github.com/go-fsnotify/fsnotify/LICENSE
+++ b/pkg/transport/LICENSE
@@ -1,5 +1,4 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/pkg/transport/transport.go b/pkg/transport/transport.go
new file mode 100644
index 0000000000..510d8b4bc2
--- /dev/null
+++ b/pkg/transport/transport.go
@@ -0,0 +1,148 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+ "sync"
+)
+
+type RequestModifier interface {
+ ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a RequestModifier that merges the HTTP headers
+// passed as an argument, with the HTTP headers of a request.
+//
+// If the same key is present in both, the modifying header values for that key,
+// are appended to the values for that same key in the request header.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+ return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+ for k, s := range http.Header(h) {
+ req.Header[k] = append(req.Header[k], s...)
+ }
+
+ return nil
+}
+
+// NewTransport returns an http.RoundTripper that modifies requests according to
+// the RequestModifiers passed in the arguments, before sending the requests to
+// the base http.RoundTripper (which, if nil, defaults to http.DefaultTransport).
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+ return &transport{
+ Modifiers: modifiers,
+ Base: base,
+ }
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+ Modifiers []RequestModifier
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req2 := CloneRequest(req)
+ for _, modifier := range t.Modifiers {
+ if err := modifier.ModifyRequest(req2); err != nil {
+ return nil, err
+ }
+ }
+
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &OnEOFReader{
+ Rc: res.Body,
+ Fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// CloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func CloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+
+ return r2
+}
+
+// OnEOFReader ensures a callback function is called
+// on Close() and when the underlying Reader returns an io.EOF error
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go
index 73c7e24fb4..9aae5c0d08 100644
--- a/pkg/truncindex/truncindex.go
+++ b/pkg/truncindex/truncindex.go
@@ -14,12 +14,6 @@ var (
ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix")
)
-func init() {
- // Change patricia max prefix per node length,
- // because our len(ID) always 64
- patricia.MaxPrefixPerNode = 64
-}
-
// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
type TruncIndex struct {
@@ -31,8 +25,11 @@ type TruncIndex struct {
// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs
func NewTruncIndex(ids []string) (idx *TruncIndex) {
idx = &TruncIndex{
- ids: make(map[string]struct{}),
- trie: patricia.NewTrie(),
+ ids: make(map[string]struct{}),
+
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
}
for _, id := range ids {
idx.addID(id)
diff --git a/pkg/units/duration.go b/pkg/units/duration.go
index cd33121496..44012aafb5 100644
--- a/pkg/units/duration.go
+++ b/pkg/units/duration.go
@@ -27,5 +27,5 @@ func HumanDuration(d time.Duration) string {
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
- return fmt.Sprintf("%f years", d.Hours()/24/365)
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
}
diff --git a/pkg/units/duration_test.go b/pkg/units/duration_test.go
index a22947402b..fcfb6b7bbd 100644
--- a/pkg/units/duration_test.go
+++ b/pkg/units/duration_test.go
@@ -41,6 +41,6 @@ func TestHumanDuration(t *testing.T) {
assertEquals(t, "13 months", HumanDuration(13*month))
assertEquals(t, "23 months", HumanDuration(23*month))
assertEquals(t, "24 months", HumanDuration(24*month))
- assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
- assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
+ assertEquals(t, "2 years", HumanDuration(24*month+2*week))
+ assertEquals(t, "3 years", HumanDuration(3*year+2*month))
}
diff --git a/pkg/units/size.go b/pkg/units/size.go
index 7cfb57ba51..d7850ad0b0 100644
--- a/pkg/units/size.go
+++ b/pkg/units/size.go
@@ -37,23 +37,25 @@ var (
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+// CustomSize returns a human-readable approximation of a size
+// using custom format
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
func HumanSize(size float64) string {
- return intToString(float64(size), 1000.0, decimapAbbrs)
+ return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs)
}
func BytesSize(size float64) string {
- return intToString(size, 1024.0, binaryAbbrs)
-}
-
-func intToString(size, unit float64, _map []string) string {
- i := 0
- for size >= unit {
- size = size / unit
- i++
- }
- return fmt.Sprintf("%.4g %s", size, _map[i])
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
}
// FromHumanSize returns an integer from a human-readable specification of a
diff --git a/pkg/urlutil/git.go b/pkg/urlutil/git.go
index ba88ddf6e6..dc4d6662e3 100644
--- a/pkg/urlutil/git.go
+++ b/pkg/urlutil/git.go
@@ -1,6 +1,9 @@
package urlutil
-import "strings"
+import (
+ "regexp"
+ "strings"
+)
var (
validPrefixes = []string{
@@ -8,11 +11,13 @@ var (
"github.com/",
"git@",
}
+
+ urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
)
// IsGitURL returns true if the provided str is a git repository URL.
func IsGitURL(str string) bool {
- if IsURL(str) && strings.HasSuffix(str, ".git") {
+ if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
return true
}
for _, prefix := range validPrefixes {
diff --git a/pkg/urlutil/git_test.go b/pkg/urlutil/git_test.go
index 01dcea7da3..bb89d8b5fd 100644
--- a/pkg/urlutil/git_test.go
+++ b/pkg/urlutil/git_test.go
@@ -9,10 +9,15 @@ var (
"git@bitbucket.org:atlassianlabs/atlassian-docker.git",
"https://github.com/docker/docker.git",
"http://github.com/docker/docker.git",
+ "http://github.com/docker/docker.git#branch",
+ "http://github.com/docker/docker.git#:dir",
}
incompleteGitUrls = []string{
"github.com/docker/docker",
}
+ invalidGitUrls = []string{
+ "http://github.com/docker/docker.git:#branch",
+ }
)
func TestValidGitTransport(t *testing.T) {
@@ -35,9 +40,16 @@ func TestIsGIT(t *testing.T) {
t.Fatalf("%q should be detected as valid Git url", url)
}
}
+
for _, url := range incompleteGitUrls {
if IsGitURL(url) == false {
t.Fatalf("%q should be detected as valid Git url", url)
}
}
+
+ for _, url := range invalidGitUrls {
+ if IsGitURL(url) == true {
+ t.Fatalf("%q should not be detected as valid Git prefix", url)
+ }
+ }
}
diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md
new file mode 100644
index 0000000000..d9cb367d10
--- /dev/null
+++ b/pkg/useragent/README.md
@@ -0,0 +1 @@
+This package provides helper functions to pack version information into a single User-Agent header.
diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go
new file mode 100644
index 0000000000..9e35d1c70d
--- /dev/null
+++ b/pkg/useragent/useragent.go
@@ -0,0 +1,60 @@
+// Package useragent provides helper functions to pack
+// version information into a single User-Agent header.
+package useragent
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ ErrNilRequest = errors.New("request cannot be nil")
+)
+
+// VersionInfo is used to model UserAgent versions.
+type VersionInfo struct {
+ Name string
+ Version string
+}
+
+func (vi *VersionInfo) isValid() bool {
+ const stopChars = " \t\r\n/"
+ name := vi.Name
+ vers := vi.Version
+ if len(name) == 0 || strings.ContainsAny(name, stopChars) {
+ return false
+ }
+ if len(vers) == 0 || strings.ContainsAny(vers, stopChars) {
+ return false
+ }
+ return true
+}
+
+// Convert versions to a string and append the string to the string base.
+//
+// Each VersionInfo will be converted to a string in the format of
+// "product/version", where the "product" is get from the name field, while
+// version is get from the version field. Several pieces of verson information
+// will be concatinated and separated by space.
+//
+// Example:
+// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"})
+// results in "base foo/1.0 bar/2.0".
+func AppendVersions(base string, versions ...VersionInfo) string {
+ if len(versions) == 0 {
+ return base
+ }
+
+ verstrs := make([]string, 0, 1+len(versions))
+ if len(base) > 0 {
+ verstrs = append(verstrs, base)
+ }
+
+ for _, v := range versions {
+ if !v.isValid() {
+ continue
+ }
+ verstrs = append(verstrs, v.Name+"/"+v.Version)
+ }
+ return strings.Join(verstrs, " ")
+}
diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go
new file mode 100644
index 0000000000..0ad7243a6d
--- /dev/null
+++ b/pkg/useragent/useragent_test.go
@@ -0,0 +1,31 @@
+package useragent
+
+import "testing"
+
+func TestVersionInfo(t *testing.T) {
+ vi := VersionInfo{"foo", "bar"}
+ if !vi.isValid() {
+ t.Fatalf("VersionInfo should be valid")
+ }
+ vi = VersionInfo{"", "bar"}
+ if vi.isValid() {
+ t.Fatalf("Expected VersionInfo to be invalid")
+ }
+ vi = VersionInfo{"foo", ""}
+ if vi.isValid() {
+ t.Fatalf("Expected VersionInfo to be invalid")
+ }
+}
+
+func TestAppendVersions(t *testing.T) {
+ vis := []VersionInfo{
+ {"foo", "1.0"},
+ {"bar", "0.1"},
+ {"pi", "3.1.4"},
+ }
+ v := AppendVersions("base", vis...)
+ expect := "base foo/1.0 bar/0.1 pi/3.1.4"
+ if v != expect {
+ t.Fatalf("expected %q, got %q", expect, v)
+ }
+}
diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md
index d321a900d6..fd2156c585 100644
--- a/project/PACKAGERS.md
+++ b/project/PACKAGERS.md
@@ -303,6 +303,7 @@ by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
+* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
## Daemon Init Script
diff --git a/registry/auth.go b/registry/auth.go
index 1ac1ca984e..33f8fa0689 100644
--- a/registry/auth.go
+++ b/registry/auth.go
@@ -11,7 +11,6 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/cliconfig"
- "github.com/docker/docker/pkg/requestdecorator"
)
type RequestAuthorization struct {
@@ -45,9 +44,6 @@ func (auth *RequestAuthorization) getToken() (string, error) {
return auth.tokenCache, nil
}
- client := auth.registryEndpoint.HTTPClient()
- factory := HTTPRequestFactory(nil)
-
for _, challenge := range auth.registryEndpoint.AuthChallenges {
switch strings.ToLower(challenge.Scheme) {
case "basic":
@@ -59,7 +55,7 @@ func (auth *RequestAuthorization) getToken() (string, error) {
params[k] = v
}
params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
- token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint, client, factory)
+ token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint)
if err != nil {
return "", err
}
@@ -92,21 +88,20 @@ func (auth *RequestAuthorization) Authorize(req *http.Request) error {
}
// Login tries to register/login to the registry server.
-func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) {
+func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
// Separates the v2 registry login logic from the v1 logic.
if registryEndpoint.Version == APIVersion2 {
- return loginV2(authConfig, registryEndpoint, factory)
+ return loginV2(authConfig, registryEndpoint)
}
- return loginV1(authConfig, registryEndpoint, factory)
+ return loginV1(authConfig, registryEndpoint)
}
// loginV1 tries to register/login to the v1 registry server.
-func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) {
+func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
var (
status string
reqBody []byte
err error
- client = registryEndpoint.HTTPClient()
reqStatusCode = 0
serverAddress = authConfig.ServerAddress
)
@@ -130,7 +125,7 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
b := strings.NewReader(string(jsonBody))
- req1, err := client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
+ req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
if err != nil {
return "", fmt.Errorf("Server Error: %s", err)
}
@@ -151,9 +146,9 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
}
} else if reqStatusCode == 400 {
if string(reqBody) == "\"Username or email already exists\"" {
- req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
+ req, err := http.NewRequest("GET", serverAddress+"users/", nil)
req.SetBasicAuth(authConfig.Username, authConfig.Password)
- resp, err := client.Do(req)
+ resp, err := registryEndpoint.client.Do(req)
if err != nil {
return "", err
}
@@ -180,9 +175,9 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
} else if reqStatusCode == 401 {
// This case would happen with private registries where /v1/users is
// protected, so people can use `docker login` as an auth check.
- req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
+ req, err := http.NewRequest("GET", serverAddress+"users/", nil)
req.SetBasicAuth(authConfig.Username, authConfig.Password)
- resp, err := client.Do(req)
+ resp, err := registryEndpoint.client.Do(req)
if err != nil {
return "", err
}
@@ -214,12 +209,11 @@ func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
// now, users should create their account through other means like directly from a web page
// served by the v2 registry service provider. Whether this will be supported in the future
// is to be determined.
-func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) {
+func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint)
var (
err error
allErrors []error
- client = registryEndpoint.HTTPClient()
)
for _, challenge := range registryEndpoint.AuthChallenges {
@@ -227,9 +221,9 @@ func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
switch strings.ToLower(challenge.Scheme) {
case "basic":
- err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory)
+ err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
case "bearer":
- err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory)
+ err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
default:
// Unsupported challenge types are explicitly skipped.
err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme)
@@ -247,15 +241,15 @@ func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, facto
return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors)
}
-func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error {
- req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil)
+func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
+ req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
if err != nil {
return err
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
- resp, err := client.Do(req)
+ resp, err := registryEndpoint.client.Do(req)
if err != nil {
return err
}
@@ -268,20 +262,20 @@ func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]str
return nil
}
-func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error {
- token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint, client, factory)
+func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
+ token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint)
if err != nil {
return err
}
- req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil)
+ req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
- resp, err := client.Do(req)
+ resp, err := registryEndpoint.client.Do(req)
if err != nil {
return err
}
diff --git a/registry/config.go b/registry/config.go
index a0a978cc72..568756f4e5 100644
--- a/registry/config.go
+++ b/registry/config.go
@@ -198,6 +198,9 @@ func ValidateIndexName(val string) (string, error) {
if val == "index."+IndexServerName() {
val = IndexServerName()
}
+ if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") {
+ return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val)
+ }
// *TODO: Check if valid hostname[:port]/ip[:port]?
return val, nil
}
@@ -235,6 +238,9 @@ func validateRemoteName(remoteName string) error {
if !validRepo.MatchString(name) {
return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name)
}
+ if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") {
+ return fmt.Errorf("Invalid repository name (%s). Cannot begin or end with a hyphen.", name)
+ }
return nil
}
diff --git a/registry/endpoint.go b/registry/endpoint.go
index 84b11a987b..ce92668f41 100644
--- a/registry/endpoint.go
+++ b/registry/endpoint.go
@@ -1,7 +1,6 @@
package registry
import (
- "crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
@@ -12,7 +11,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/registry/api/v2"
- "github.com/docker/docker/pkg/requestdecorator"
+ "github.com/docker/docker/pkg/transport"
)
// for mocking in unit tests
@@ -43,9 +42,9 @@ func scanForAPIVersion(address string) (string, APIVersion) {
}
// NewEndpoint parses the given address to return a registry endpoint.
-func NewEndpoint(index *IndexInfo) (*Endpoint, error) {
+func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) {
// *TODO: Allow per-registry configuration of endpoints.
- endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure)
+ endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure, metaHeaders)
if err != nil {
return nil, err
}
@@ -83,7 +82,7 @@ func validateEndpoint(endpoint *Endpoint) error {
return nil
}
-func newEndpoint(address string, secure bool) (*Endpoint, error) {
+func newEndpoint(address string, secure bool, metaHeaders http.Header) (*Endpoint, error) {
var (
endpoint = new(Endpoint)
trimmedAddress string
@@ -100,15 +99,18 @@ func newEndpoint(address string, secure bool) (*Endpoint, error) {
return nil, err
}
endpoint.IsSecure = secure
+ tr := NewTransport(ConnectTimeout, endpoint.IsSecure)
+ endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...))
return endpoint, nil
}
-func (repoInfo *RepositoryInfo) GetEndpoint() (*Endpoint, error) {
- return NewEndpoint(repoInfo.Index)
+func (repoInfo *RepositoryInfo) GetEndpoint(metaHeaders http.Header) (*Endpoint, error) {
+ return NewEndpoint(repoInfo.Index, metaHeaders)
}
// Endpoint stores basic information about a registry endpoint.
type Endpoint struct {
+ client *http.Client
URL *url.URL
Version APIVersion
IsSecure bool
@@ -135,25 +137,24 @@ func (e *Endpoint) Path(path string) string {
func (e *Endpoint) Ping() (RegistryInfo, error) {
// The ping logic to use is determined by the registry endpoint version.
- factory := HTTPRequestFactory(nil)
switch e.Version {
case APIVersion1:
- return e.pingV1(factory)
+ return e.pingV1()
case APIVersion2:
- return e.pingV2(factory)
+ return e.pingV2()
}
// APIVersionUnknown
// We should try v2 first...
e.Version = APIVersion2
- regInfo, errV2 := e.pingV2(factory)
+ regInfo, errV2 := e.pingV2()
if errV2 == nil {
return regInfo, nil
}
// ... then fallback to v1.
e.Version = APIVersion1
- regInfo, errV1 := e.pingV1(factory)
+ regInfo, errV1 := e.pingV1()
if errV1 == nil {
return regInfo, nil
}
@@ -162,7 +163,7 @@ func (e *Endpoint) Ping() (RegistryInfo, error) {
return RegistryInfo{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1)
}
-func (e *Endpoint) pingV1(factory *requestdecorator.RequestFactory) (RegistryInfo, error) {
+func (e *Endpoint) pingV1() (RegistryInfo, error) {
logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
if e.String() == IndexServerAddress() {
@@ -171,12 +172,12 @@ func (e *Endpoint) pingV1(factory *requestdecorator.RequestFactory) (RegistryInf
return RegistryInfo{Standalone: false}, nil
}
- req, err := factory.NewRequest("GET", e.Path("_ping"), nil)
+ req, err := http.NewRequest("GET", e.Path("_ping"), nil)
if err != nil {
return RegistryInfo{Standalone: false}, err
}
- resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure)
+ resp, err := e.client.Do(req)
if err != nil {
return RegistryInfo{Standalone: false}, err
}
@@ -216,15 +217,15 @@ func (e *Endpoint) pingV1(factory *requestdecorator.RequestFactory) (RegistryInf
return info, nil
}
-func (e *Endpoint) pingV2(factory *requestdecorator.RequestFactory) (RegistryInfo, error) {
+func (e *Endpoint) pingV2() (RegistryInfo, error) {
logrus.Debugf("attempting v2 ping for registry endpoint %s", e)
- req, err := factory.NewRequest("GET", e.Path(""), nil)
+ req, err := http.NewRequest("GET", e.Path(""), nil)
if err != nil {
return RegistryInfo{}, err
}
- resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure)
+ resp, err := e.client.Do(req)
if err != nil {
return RegistryInfo{}, err
}
@@ -263,20 +264,3 @@ HeaderLoop:
return RegistryInfo{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode))
}
-
-func (e *Endpoint) HTTPClient() *http.Client {
- tlsConfig := tls.Config{
- MinVersion: tls.VersionTLS10,
- }
- if !e.IsSecure {
- tlsConfig.InsecureSkipVerify = true
- }
- return &http.Client{
- Transport: &http.Transport{
- DisableKeepAlives: true,
- Proxy: http.ProxyFromEnvironment,
- TLSClientConfig: &tlsConfig,
- },
- CheckRedirect: AddRequiredHeadersToRedirectedRequests,
- }
-}
diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go
index 9567ba2352..6f67867bbb 100644
--- a/registry/endpoint_test.go
+++ b/registry/endpoint_test.go
@@ -19,7 +19,7 @@ func TestEndpointParse(t *testing.T) {
{"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"},
}
for _, td := range testData {
- e, err := newEndpoint(td.str, false)
+ e, err := newEndpoint(td.str, false, nil)
if err != nil {
t.Errorf("%q: %s", td.str, err)
}
@@ -60,6 +60,7 @@ func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) {
testEndpoint := Endpoint{
URL: testServerURL,
Version: APIVersionUnknown,
+ client: HTTPClient(NewTransport(ConnectTimeout, false)),
}
if err = validateEndpoint(&testEndpoint); err != nil {
diff --git a/registry/httpfactory.go b/registry/httpfactory.go
deleted file mode 100644
index f1b89e5829..0000000000
--- a/registry/httpfactory.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package registry
-
-import (
- "runtime"
-
- "github.com/docker/docker/autogen/dockerversion"
- "github.com/docker/docker/pkg/parsers/kernel"
- "github.com/docker/docker/pkg/requestdecorator"
-)
-
-func HTTPRequestFactory(metaHeaders map[string][]string) *requestdecorator.RequestFactory {
- // FIXME: this replicates the 'info' job.
- httpVersion := make([]requestdecorator.UAVersionInfo, 0, 4)
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("docker", dockerversion.VERSION))
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("go", runtime.Version()))
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("git-commit", dockerversion.GITCOMMIT))
- if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("kernel", kernelVersion.String()))
- }
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("os", runtime.GOOS))
- httpVersion = append(httpVersion, requestdecorator.NewUAVersionInfo("arch", runtime.GOARCH))
- uad := &requestdecorator.UserAgentDecorator{
- Versions: httpVersion,
- }
- mhd := &requestdecorator.MetaHeadersDecorator{
- Headers: metaHeaders,
- }
- factory := requestdecorator.NewRequestFactory(uad, mhd)
- return factory
-}
diff --git a/registry/registry.go b/registry/registry.go
index 163e2de377..4436f135b9 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -8,13 +8,20 @@ import (
"io/ioutil"
"net"
"net/http"
+ "net/http/httputil"
"os"
"path"
+ "path/filepath"
+ "runtime"
"strings"
"time"
"github.com/Sirupsen/logrus"
+ "github.com/docker/docker/autogen/dockerversion"
+ "github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/timeoutconn"
+ "github.com/docker/docker/pkg/transport"
+ "github.com/docker/docker/pkg/useragent"
)
var (
@@ -31,29 +38,118 @@ const (
ConnectTimeout
)
-func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate, timeout TimeoutType, secure bool) *http.Client {
- tlsConfig := tls.Config{
- RootCAs: roots,
+// dockerUserAgent is the User-Agent the Docker client uses to identify itself.
+// It is populated on init(), comprising version information of different components.
+var dockerUserAgent string
+
+func init() {
+ httpVersion := make([]useragent.VersionInfo, 0, 6)
+ httpVersion = append(httpVersion, useragent.VersionInfo{"docker", dockerversion.VERSION})
+ httpVersion = append(httpVersion, useragent.VersionInfo{"go", runtime.Version()})
+ httpVersion = append(httpVersion, useragent.VersionInfo{"git-commit", dockerversion.GITCOMMIT})
+ if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
+ httpVersion = append(httpVersion, useragent.VersionInfo{"kernel", kernelVersion.String()})
+ }
+ httpVersion = append(httpVersion, useragent.VersionInfo{"os", runtime.GOOS})
+ httpVersion = append(httpVersion, useragent.VersionInfo{"arch", runtime.GOARCH})
+
+ dockerUserAgent = useragent.AppendVersions("", httpVersion...)
+}
+
+type httpsRequestModifier struct{ tlsConfig *tls.Config }
+
+// DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,
+// it's because it's so as to match the current behavior in master: we generate the
+// certpool on every-goddam-request. It's not great, but it allows people to just put
+// the certs in /etc/docker/certs.d/.../ and let docker "pick it up" immediately. Would
+// prefer an fsnotify implementation, but that was out of scope of my refactoring.
+func (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {
+ var (
+ roots *x509.CertPool
+ certs []tls.Certificate
+ hostDir string
+ )
+
+ if req.URL.Scheme == "https" {
+ hasFile := func(files []os.FileInfo, name string) bool {
+ for _, f := range files {
+ if f.Name() == name {
+ return true
+ }
+ }
+ return false
+ }
+
+ if runtime.GOOS == "windows" {
+ hostDir = path.Join(os.TempDir(), "/docker/certs.d", req.URL.Host)
+ } else {
+ hostDir = path.Join("/etc/docker/certs.d", req.URL.Host)
+ }
+ logrus.Debugf("hostDir: %s", hostDir)
+ fs, err := ioutil.ReadDir(hostDir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil
+ }
+
+ for _, f := range fs {
+ if strings.HasSuffix(f.Name(), ".crt") {
+ if roots == nil {
+ roots = x509.NewCertPool()
+ }
+ logrus.Debugf("crt: %s", hostDir+"/"+f.Name())
+ data, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))
+ if err != nil {
+ return err
+ }
+ roots.AppendCertsFromPEM(data)
+ }
+ if strings.HasSuffix(f.Name(), ".cert") {
+ certName := f.Name()
+ keyName := certName[:len(certName)-5] + ".key"
+ logrus.Debugf("cert: %s", hostDir+"/"+f.Name())
+ if !hasFile(fs, keyName) {
+ return fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
+ }
+ cert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), path.Join(hostDir, keyName))
+ if err != nil {
+ return err
+ }
+ certs = append(certs, cert)
+ }
+ if strings.HasSuffix(f.Name(), ".key") {
+ keyName := f.Name()
+ certName := keyName[:len(keyName)-4] + ".cert"
+ logrus.Debugf("key: %s", hostDir+"/"+f.Name())
+ if !hasFile(fs, certName) {
+ return fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
+ }
+ }
+ }
+ m.tlsConfig.RootCAs = roots
+ m.tlsConfig.Certificates = certs
+ }
+ return nil
+}
+
+func NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {
+ tlsConfig := &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
- Certificates: certs,
+ MinVersion: tls.VersionTLS10,
+ InsecureSkipVerify: !secure,
}
- if !secure {
- tlsConfig.InsecureSkipVerify = true
- }
-
- httpTransport := &http.Transport{
+ tr := &http.Transport{
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
- TLSClientConfig: &tlsConfig,
+ TLSClientConfig: tlsConfig,
}
switch timeout {
case ConnectTimeout:
- httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
- // Set the connect timeout to 5 seconds
- d := net.Dialer{Timeout: 5 * time.Second, DualStack: true}
+ tr.Dial = func(proto string, addr string) (net.Conn, error) {
+ // Set the connect timeout to 30 seconds to allow for slower connection
+ // times...
+ d := net.Dialer{Timeout: 30 * time.Second, DualStack: true}
conn, err := d.Dial(proto, addr)
if err != nil {
@@ -64,7 +160,7 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate
return conn, nil
}
case ReceiveTimeout:
- httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
+ tr.Dial = func(proto string, addr string) (net.Conn, error) {
d := net.Dialer{DualStack: true}
conn, err := d.Dial(proto, addr)
@@ -76,84 +172,57 @@ func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate
}
}
- return &http.Client{
- Transport: httpTransport,
- CheckRedirect: AddRequiredHeadersToRedirectedRequests,
- Jar: jar,
+ if secure {
+ // note: httpsTransport also handles http transport
+ // but for HTTPS, it sets up the certs
+ return transport.NewTransport(tr, &httpsRequestModifier{tlsConfig})
}
+
+ return tr
}
-func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) {
- var (
- pool *x509.CertPool
- certs []tls.Certificate
- )
+// DockerHeaders returns request modifiers that ensure requests have
+// the User-Agent header set to dockerUserAgent and that metaHeaders
+// are added.
+func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {
+ modifiers := []transport.RequestModifier{
+ transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}),
+ }
+ if metaHeaders != nil {
+ modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
+ }
+ return modifiers
+}
- if secure && req.URL.Scheme == "https" {
- hasFile := func(files []os.FileInfo, name string) bool {
- for _, f := range files {
- if f.Name() == name {
- return true
- }
- }
- return false
- }
+type debugTransport struct{ http.RoundTripper }
- hostDir := path.Join("/etc/docker/certs.d", req.URL.Host)
- logrus.Debugf("hostDir: %s", hostDir)
- fs, err := ioutil.ReadDir(hostDir)
- if err != nil && !os.IsNotExist(err) {
- return nil, nil, err
- }
+func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ dump, err := httputil.DumpRequestOut(req, false)
+ if err != nil {
+ fmt.Println("could not dump request")
+ }
+ fmt.Println(string(dump))
+ resp, err := tr.RoundTripper.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+ dump, err = httputil.DumpResponse(resp, false)
+ if err != nil {
+ fmt.Println("could not dump response")
+ }
+ fmt.Println(string(dump))
+ return resp, err
+}
- for _, f := range fs {
- if strings.HasSuffix(f.Name(), ".crt") {
- if pool == nil {
- pool = x509.NewCertPool()
- }
- logrus.Debugf("crt: %s", hostDir+"/"+f.Name())
- data, err := ioutil.ReadFile(path.Join(hostDir, f.Name()))
- if err != nil {
- return nil, nil, err
- }
- pool.AppendCertsFromPEM(data)
- }
- if strings.HasSuffix(f.Name(), ".cert") {
- certName := f.Name()
- keyName := certName[:len(certName)-5] + ".key"
- logrus.Debugf("cert: %s", hostDir+"/"+f.Name())
- if !hasFile(fs, keyName) {
- return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
- }
- cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName))
- if err != nil {
- return nil, nil, err
- }
- certs = append(certs, cert)
- }
- if strings.HasSuffix(f.Name(), ".key") {
- keyName := f.Name()
- certName := keyName[:len(keyName)-4] + ".cert"
- logrus.Debugf("key: %s", hostDir+"/"+f.Name())
- if !hasFile(fs, certName) {
- return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
- }
- }
- }
+func HTTPClient(transport http.RoundTripper) *http.Client {
+ if transport == nil {
+ transport = NewTransport(ConnectTimeout, true)
}
- if len(certs) == 0 {
- client := newClient(jar, pool, nil, timeout, secure)
- res, err := client.Do(req)
- if err != nil {
- return nil, nil, err
- }
- return res, client, nil
+ return &http.Client{
+ Transport: transport,
+ CheckRedirect: AddRequiredHeadersToRedirectedRequests,
}
-
- client := newClient(jar, pool, certs, timeout, secure)
- res, err := client.Do(req)
- return res, client, err
}
func trustedLocation(req *http.Request) bool {
diff --git a/registry/registry_test.go b/registry/registry_test.go
index 3f63eb6e25..33e86ff43a 100644
--- a/registry/registry_test.go
+++ b/registry/registry_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"github.com/docker/docker/cliconfig"
- "github.com/docker/docker/pkg/requestdecorator"
+ "github.com/docker/docker/pkg/transport"
)
var (
@@ -22,45 +22,34 @@ const (
func spawnTestRegistrySession(t *testing.T) *Session {
authConfig := &cliconfig.AuthConfig{}
- endpoint, err := NewEndpoint(makeIndex("/v1/"))
+ endpoint, err := NewEndpoint(makeIndex("/v1/"), nil)
if err != nil {
t.Fatal(err)
}
- r, err := NewSession(authConfig, requestdecorator.NewRequestFactory(), endpoint, true)
+ var tr http.RoundTripper = debugTransport{NewTransport(ReceiveTimeout, endpoint.IsSecure)}
+ tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...)
+ client := HTTPClient(tr)
+ r, err := NewSession(client, authConfig, endpoint)
if err != nil {
t.Fatal(err)
}
+ // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true`
+ // header while authenticating, in order to retrieve a token that can be later used to
+ // perform authenticated actions.
+ //
+ // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead,
+ // it will consider authenticated any request with the header `X-Docker-Token: fake-token`.
+ //
+ // Because we know that the client's transport is an `*authTransport` we simply cast it,
+ // in order to set the internal cached token to the fake token, and thus send that fake token
+ // upon every subsequent requests.
+ r.client.Transport.(*authTransport).token = token
return r
}
-func TestPublicSession(t *testing.T) {
- authConfig := &cliconfig.AuthConfig{}
-
- getSessionDecorators := func(index *IndexInfo) int {
- endpoint, err := NewEndpoint(index)
- if err != nil {
- t.Fatal(err)
- }
- r, err := NewSession(authConfig, requestdecorator.NewRequestFactory(), endpoint, true)
- if err != nil {
- t.Fatal(err)
- }
- return len(r.reqFactory.GetDecorators())
- }
-
- decorators := getSessionDecorators(makeIndex("/v1/"))
- assertEqual(t, decorators, 0, "Expected no decorator on http session")
-
- decorators = getSessionDecorators(makeHttpsIndex("/v1/"))
- assertNotEqual(t, decorators, 0, "Expected decorator on https session")
-
- decorators = getSessionDecorators(makePublicIndex())
- assertEqual(t, decorators, 0, "Expected no decorator on public session")
-}
-
func TestPingRegistryEndpoint(t *testing.T) {
testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) {
- ep, err := NewEndpoint(index)
+ ep, err := NewEndpoint(index, nil)
if err != nil {
t.Fatal(err)
}
@@ -80,7 +69,7 @@ func TestPingRegistryEndpoint(t *testing.T) {
func TestEndpoint(t *testing.T) {
// Simple wrapper to fail test if err != nil
expandEndpoint := func(index *IndexInfo) *Endpoint {
- endpoint, err := NewEndpoint(index)
+ endpoint, err := NewEndpoint(index, nil)
if err != nil {
t.Fatal(err)
}
@@ -89,7 +78,7 @@ func TestEndpoint(t *testing.T) {
assertInsecureIndex := func(index *IndexInfo) {
index.Secure = true
- _, err := NewEndpoint(index)
+ _, err := NewEndpoint(index, nil)
assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index")
assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index")
index.Secure = false
@@ -97,7 +86,7 @@ func TestEndpoint(t *testing.T) {
assertSecureIndex := func(index *IndexInfo) {
index.Secure = true
- _, err := NewEndpoint(index)
+ _, err := NewEndpoint(index, nil)
assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index")
assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index")
index.Secure = false
@@ -163,14 +152,14 @@ func TestEndpoint(t *testing.T) {
}
for _, address := range badEndpoints {
index.Name = address
- _, err := NewEndpoint(index)
+ _, err := NewEndpoint(index, nil)
checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint")
}
}
func TestGetRemoteHistory(t *testing.T) {
r := spawnTestRegistrySession(t)
- hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"), token)
+ hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"))
if err != nil {
t.Fatal(err)
}
@@ -182,16 +171,16 @@ func TestGetRemoteHistory(t *testing.T) {
func TestLookupRemoteImage(t *testing.T) {
r := spawnTestRegistrySession(t)
- err := r.LookupRemoteImage(imageID, makeURL("/v1/"), token)
+ err := r.LookupRemoteImage(imageID, makeURL("/v1/"))
assertEqual(t, err, nil, "Expected error of remote lookup to nil")
- if err := r.LookupRemoteImage("abcdef", makeURL("/v1/"), token); err == nil {
+ if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil {
t.Fatal("Expected error of remote lookup to not nil")
}
}
func TestGetRemoteImageJSON(t *testing.T) {
r := spawnTestRegistrySession(t)
- json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"), token)
+ json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"))
if err != nil {
t.Fatal(err)
}
@@ -200,7 +189,7 @@ func TestGetRemoteImageJSON(t *testing.T) {
t.Fatal("Expected non-empty json")
}
- _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), token)
+ _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"))
if err == nil {
t.Fatal("Expected image not found error")
}
@@ -208,7 +197,7 @@ func TestGetRemoteImageJSON(t *testing.T) {
func TestGetRemoteImageLayer(t *testing.T) {
r := spawnTestRegistrySession(t)
- data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), token, 0)
+ data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0)
if err != nil {
t.Fatal(err)
}
@@ -216,7 +205,7 @@ func TestGetRemoteImageLayer(t *testing.T) {
t.Fatal("Expected non-nil data result")
}
- _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), token, 0)
+ _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0)
if err == nil {
t.Fatal("Expected image not found error")
}
@@ -224,14 +213,14 @@ func TestGetRemoteImageLayer(t *testing.T) {
func TestGetRemoteTags(t *testing.T) {
r := spawnTestRegistrySession(t)
- tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, token)
+ tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO)
if err != nil {
t.Fatal(err)
}
assertEqual(t, len(tags), 1, "Expected one tag")
assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID)
- _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", token)
+ _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz")
if err == nil {
t.Fatal("Expected error when fetching tags for bogus repo")
}
@@ -265,7 +254,7 @@ func TestPushImageJSONRegistry(t *testing.T) {
Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37",
}
- err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), token)
+ err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"))
if err != nil {
t.Fatal(err)
}
@@ -274,7 +263,7 @@ func TestPushImageJSONRegistry(t *testing.T) {
func TestPushImageLayerRegistry(t *testing.T) {
r := spawnTestRegistrySession(t)
layer := strings.NewReader("")
- _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), token, []byte{})
+ _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{})
if err != nil {
t.Fatal(err)
}
@@ -299,6 +288,9 @@ func TestValidateRepositoryName(t *testing.T) {
invalidRepoNames := []string{
"https://github.com/docker/docker",
"docker/Docker",
+ "-docker",
+ "-docker/docker",
+ "-docker.io/docker/docker",
"docker///docker",
"docker.io/docker/Docker",
"docker.io/docker///docker",
@@ -691,7 +683,7 @@ func TestNewIndexInfo(t *testing.T) {
func TestPushRegistryTag(t *testing.T) {
r := spawnTestRegistrySession(t)
- err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"), token)
+ err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"))
if err != nil {
t.Fatal(err)
}
diff --git a/registry/service.go b/registry/service.go
index 87fc1d076f..6811749272 100644
--- a/registry/service.go
+++ b/registry/service.go
@@ -1,6 +1,10 @@
package registry
-import "github.com/docker/docker/cliconfig"
+import (
+ "net/http"
+
+ "github.com/docker/docker/cliconfig"
+)
type Service struct {
Config *ServiceConfig
@@ -27,12 +31,12 @@ func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) {
if err != nil {
return "", err
}
- endpoint, err := NewEndpoint(index)
+ endpoint, err := NewEndpoint(index, nil)
if err != nil {
return "", err
}
authConfig.ServerAddress = endpoint.String()
- return Login(authConfig, endpoint, HTTPRequestFactory(nil))
+ return Login(authConfig, endpoint)
}
// Search queries the public registry for images matching the specified
@@ -42,12 +46,13 @@ func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers
if err != nil {
return nil, err
}
+
// *TODO: Search multiple indexes.
- endpoint, err := repoInfo.GetEndpoint()
+ endpoint, err := repoInfo.GetEndpoint(http.Header(headers))
if err != nil {
return nil, err
}
- r, err := NewSession(authConfig, HTTPRequestFactory(headers), endpoint, true)
+ r, err := NewSession(endpoint.client, authConfig, endpoint)
if err != nil {
return nil, err
}
diff --git a/registry/session.go b/registry/session.go
index e65f82cd61..71b27bef96 100644
--- a/registry/session.go
+++ b/registry/session.go
@@ -3,6 +3,8 @@ package registry
import (
"bytes"
"crypto/sha256"
+ "errors"
+ "sync"
// this is required for some certificates
_ "crypto/sha512"
"encoding/hex"
@@ -20,64 +22,140 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/httputils"
- "github.com/docker/docker/pkg/requestdecorator"
"github.com/docker/docker/pkg/tarsum"
+ "github.com/docker/docker/pkg/transport"
)
type Session struct {
- authConfig *cliconfig.AuthConfig
- reqFactory *requestdecorator.RequestFactory
indexEndpoint *Endpoint
- jar *cookiejar.Jar
- timeout TimeoutType
+ client *http.Client
+ // TODO(tiborvass): remove authConfig
+ authConfig *cliconfig.AuthConfig
}
-func NewSession(authConfig *cliconfig.AuthConfig, factory *requestdecorator.RequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) {
+type authTransport struct {
+ http.RoundTripper
+ *cliconfig.AuthConfig
+
+ alwaysSetBasicAuth bool
+ token []string
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// AuthTransport handles the auth layer when communicating with a v1 registry (private or official)
+//
+// For private v1 registries, set alwaysSetBasicAuth to true.
+//
+// For the official v1 registry, if there isn't already an Authorization header in the request,
+// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header.
+// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing
+// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent
+// requests.
+//
+// If the server sends a token without the client having requested it, it is ignored.
+//
+// This RoundTripper also has a CancelRequest method important for correct timeout handling.
+func AuthTransport(base http.RoundTripper, authConfig *cliconfig.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper {
+ if base == nil {
+ base = http.DefaultTransport
+ }
+ return &authTransport{
+ RoundTripper: base,
+ AuthConfig: authConfig,
+ alwaysSetBasicAuth: alwaysSetBasicAuth,
+ modReq: make(map[*http.Request]*http.Request),
+ }
+}
+
+func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
+ req := transport.CloneRequest(orig)
+ tr.mu.Lock()
+ tr.modReq[orig] = req
+ tr.mu.Unlock()
+
+ if tr.alwaysSetBasicAuth {
+ req.SetBasicAuth(tr.Username, tr.Password)
+ return tr.RoundTripper.RoundTrip(req)
+ }
+
+ // Don't override
+ if req.Header.Get("Authorization") == "" {
+ if req.Header.Get("X-Docker-Token") == "true" && len(tr.Username) > 0 {
+ req.SetBasicAuth(tr.Username, tr.Password)
+ } else if len(tr.token) > 0 {
+ req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
+ }
+ }
+ resp, err := tr.RoundTripper.RoundTrip(req)
+ if err != nil {
+ delete(tr.modReq, orig)
+ return nil, err
+ }
+ if len(resp.Header["X-Docker-Token"]) > 0 {
+ tr.token = resp.Header["X-Docker-Token"]
+ }
+ resp.Body = &transport.OnEOFReader{
+ Rc: resp.Body,
+ Fn: func() { delete(tr.modReq, orig) },
+ }
+ return resp, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (tr *authTransport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := tr.RoundTripper.(canceler); ok {
+ tr.mu.Lock()
+ modReq := tr.modReq[req]
+ delete(tr.modReq, req)
+ tr.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+// TODO(tiborvass): remove authConfig param once registry client v2 is vendored
+func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) {
r = &Session{
authConfig: authConfig,
+ client: client,
indexEndpoint: endpoint,
}
- if timeout {
- r.timeout = ReceiveTimeout
- }
-
- r.jar, err = cookiejar.New(nil)
- if err != nil {
- return nil, err
- }
+ var alwaysSetBasicAuth bool
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
- // alongside our requests.
- if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" {
- info, err := r.indexEndpoint.Ping()
+ // alongside all our requests.
+ if endpoint.VersionString(1) != IndexServerAddress() && endpoint.URL.Scheme == "https" {
+ info, err := endpoint.Ping()
if err != nil {
return nil, err
}
- if info.Standalone && authConfig != nil && factory != nil {
- logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", r.indexEndpoint.String())
- dec := requestdecorator.NewAuthDecorator(authConfig.Username, authConfig.Password)
- factory.AddDecorator(dec)
+
+ if info.Standalone && authConfig != nil {
+ logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String())
+ alwaysSetBasicAuth = true
}
}
- r.reqFactory = factory
- return r, nil
-}
+ client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth)
-func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) {
- return doRequest(req, r.jar, r.timeout, r.indexEndpoint.IsSecure)
+ jar, err := cookiejar.New(nil)
+ if err != nil {
+ return nil, errors.New("cookiejar.New is not supposed to return an error")
+ }
+ client.Jar = jar
+
+ return r, nil
}
// Retrieve the history of a given image from the Registry.
// Return a list of the parent's json (requested image included)
-func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
- req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil)
- if err != nil {
- return nil, err
- }
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) {
+ res, err := r.client.Get(registry + "images/" + imgID + "/ancestry")
if err != nil {
return nil, err
}
@@ -89,27 +167,18 @@ func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]st
return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
}
- jsonString, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return nil, fmt.Errorf("Error while reading the http response: %s", err)
+ var history []string
+ if err := json.NewDecoder(res.Body).Decode(&history); err != nil {
+ return nil, fmt.Errorf("Error while reading the http response: %v", err)
}
- logrus.Debugf("Ancestry: %s", jsonString)
- history := new([]string)
- if err := json.Unmarshal(jsonString, history); err != nil {
- return nil, err
- }
- return *history, nil
+ logrus.Debugf("Ancestry: %v", history)
+ return history, nil
}
// Check if an image exists in the Registry
-func (r *Session) LookupRemoteImage(imgID, registry string, token []string) error {
- req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
- if err != nil {
- return err
- }
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+func (r *Session) LookupRemoteImage(imgID, registry string) error {
+ res, err := r.client.Get(registry + "images/" + imgID + "/json")
if err != nil {
return err
}
@@ -121,14 +190,8 @@ func (r *Session) LookupRemoteImage(imgID, registry string, token []string) erro
}
// Retrieve an image from the Registry.
-func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
- // Get the JSON
- req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
- if err != nil {
- return nil, -1, fmt.Errorf("Failed to download json: %s", err)
- }
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) {
+ res, err := r.client.Get(registry + "images/" + imgID + "/json")
if err != nil {
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
}
@@ -147,44 +210,44 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
- return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
+ return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString)
}
return jsonString, imageSize, nil
}
-func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) {
+func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) {
var (
retries = 5
statusCode = 0
- client *http.Client
res *http.Response
+ err error
imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID)
)
- req, err := r.reqFactory.NewRequest("GET", imageURL, nil)
+ req, err := http.NewRequest("GET", imageURL, nil)
if err != nil {
- return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
+ return nil, fmt.Errorf("Error while getting from the server: %v", err)
}
- setTokenAuth(req, token)
+ // TODO: why are we doing retries at this level?
+ // These retries should be generic to both v1 and v2
for i := 1; i <= retries; i++ {
statusCode = 0
- res, client, err = r.doRequest(req)
- if err != nil {
- logrus.Debugf("Error contacting registry: %s", err)
- if res != nil {
- if res.Body != nil {
- res.Body.Close()
- }
- statusCode = res.StatusCode
- }
- if i == retries {
- return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
- statusCode, imgID)
- }
- time.Sleep(time.Duration(i) * 5 * time.Second)
- continue
+ res, err = r.client.Do(req)
+ if err == nil {
+ break
}
- break
+ logrus.Debugf("Error contacting registry %s: %v", registry, err)
+ if res != nil {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ statusCode = res.StatusCode
+ }
+ if i == retries {
+ return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+ statusCode, imgID)
+ }
+ time.Sleep(time.Duration(i) * 5 * time.Second)
}
if res.StatusCode != 200 {
@@ -195,13 +258,13 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, im
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
logrus.Debugf("server supports resume")
- return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil
+ return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
}
logrus.Debugf("server doesn't support resume")
return res.Body, nil
}
-func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
+func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) {
if strings.Count(repository, "/") == 0 {
// This will be removed once the Registry supports auto-resolution on
// the "library" namespace
@@ -209,13 +272,7 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token []
}
for _, host := range registries {
endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
- req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
-
- if err != nil {
- return nil, err
- }
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+ res, err := r.client.Get(endpoint)
if err != nil {
return nil, err
}
@@ -263,16 +320,13 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
- req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
+ req, err := http.NewRequest("GET", repositoryTarget, nil)
if err != nil {
return nil, err
}
- if r.authConfig != nil && len(r.authConfig.Username) > 0 {
- req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
- }
+ // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
req.Header.Set("X-Docker-Token", "true")
-
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, err
}
@@ -292,11 +346,6 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res)
}
- var tokens []string
- if res.Header.Get("X-Docker-Token") != "" {
- tokens = res.Header["X-Docker-Token"]
- }
-
var endpoints []string
if res.Header.Get("X-Docker-Endpoints") != "" {
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
@@ -322,29 +371,29 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
return &RepositoryData{
ImgList: imgsData,
Endpoints: endpoints,
- Tokens: tokens,
}, nil
}
-func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error {
+func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error {
- logrus.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum")
+ u := registry + "images/" + imgData.ID + "/checksum"
- req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil)
+ logrus.Debugf("[registry] Calling PUT %s", u)
+
+ req, err := http.NewRequest("PUT", u, nil)
if err != nil {
return err
}
- setTokenAuth(req, token)
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
- return fmt.Errorf("Failed to upload metadata: %s", err)
+ return fmt.Errorf("Failed to upload metadata: %v", err)
}
defer res.Body.Close()
if len(res.Cookies()) > 0 {
- r.jar.SetCookies(req.URL, res.Cookies())
+ r.client.Jar.SetCookies(req.URL, res.Cookies())
}
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
@@ -363,18 +412,19 @@ func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, t
}
// Push a local image to the registry
-func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
+func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error {
- logrus.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json")
+ u := registry + "images/" + imgData.ID + "/json"
- req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw))
+ logrus.Debugf("[registry] Calling PUT %s", u)
+
+ req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
@@ -398,9 +448,11 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist
return nil
}
-func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
+func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
- logrus.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer")
+ u := registry + "images/" + imgID + "/layer"
+
+ logrus.Debugf("[registry] Calling PUT %s", u)
tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0)
if err != nil {
@@ -411,17 +463,16 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry
h.Write([]byte{'\n'})
checksumLayer := io.TeeReader(tarsumLayer, h)
- req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer)
+ req, err := http.NewRequest("PUT", u, checksumLayer)
if err != nil {
return "", "", err
}
req.Header.Add("Content-Type", "application/octet-stream")
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
- setTokenAuth(req, token)
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
- return "", "", fmt.Errorf("Failed to upload layer: %s", err)
+ return "", "", fmt.Errorf("Failed to upload layer: %v", err)
}
if rc, ok := layer.(io.Closer); ok {
if err := rc.Close(); err != nil {
@@ -444,19 +495,18 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry
// push a tag on the registry.
// Remote has the format '/
-func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
+func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error {
// "jsonify" the string
revision = "\"" + revision + "\""
path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
- req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
+ req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision))
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
- setTokenAuth(req, token)
req.ContentLength = int64(len(revision))
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return err
}
@@ -491,7 +541,8 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate
logrus.Debugf("[registry] PUT %s", u)
logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
headers := map[string][]string{
- "Content-type": {"application/json"},
+ "Content-type": {"application/json"},
+ // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
"X-Docker-Token": {"true"},
}
if validate {
@@ -526,9 +577,6 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate
}
return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res)
}
- if res.Header.Get("X-Docker-Token") == "" {
- return nil, fmt.Errorf("Index response didn't contain an access token")
- }
tokens = res.Header["X-Docker-Token"]
logrus.Debugf("Auth token: %v", tokens)
@@ -539,8 +587,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate
if err != nil {
return nil, err
}
- }
- if validate {
+ } else {
if res.StatusCode != 204 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
@@ -551,22 +598,20 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate
}
return &RepositoryData{
- Tokens: tokens,
Endpoints: endpoints,
}, nil
}
func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) {
- req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(body))
+ req, err := http.NewRequest("PUT", u, bytes.NewReader(body))
if err != nil {
return nil, err
}
- req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
req.ContentLength = int64(len(body))
for k, v := range headers {
req.Header[k] = v
}
- response, _, err := r.doRequest(req)
+ response, err := r.client.Do(req)
if err != nil {
return nil, err
}
@@ -580,15 +625,7 @@ func shouldRedirect(response *http.Response) bool {
func (r *Session) SearchRepositories(term string) (*SearchResults, error) {
logrus.Debugf("Index server: %s", r.indexEndpoint)
u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term)
- req, err := r.reqFactory.NewRequest("GET", u, nil)
- if err != nil {
- return nil, err
- }
- if r.authConfig != nil && len(r.authConfig.Username) > 0 {
- req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
- }
- req.Header.Set("X-Docker-Token", "true")
- res, _, err := r.doRequest(req)
+ res, err := r.client.Get(u)
if err != nil {
return nil, err
}
@@ -600,6 +637,7 @@ func (r *Session) SearchRepositories(term string) (*SearchResults, error) {
return result, json.NewDecoder(res.Body).Decode(result)
}
+// TODO(tiborvass): remove this once registry client v2 is vendored
func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig {
password := ""
if withPasswd {
@@ -611,9 +649,3 @@ func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig {
Email: r.authConfig.Email,
}
}
-
-func setTokenAuth(req *http.Request, token []string) {
- if req.Header.Get("Authorization") == "" { // Don't override
- req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
- }
-}
diff --git a/registry/session_v2.go b/registry/session_v2.go
index 4188e505bd..43d638c798 100644
--- a/registry/session_v2.go
+++ b/registry/session_v2.go
@@ -27,7 +27,7 @@ func getV2Builder(e *Endpoint) *v2.URLBuilder {
func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) {
// TODO check if should use Mirror
if index.Official {
- ep, err = newEndpoint(REGISTRYSERVER, true)
+ ep, err = newEndpoint(REGISTRYSERVER, true, nil)
if err != nil {
return
}
@@ -38,7 +38,7 @@ func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error)
} else if r.indexEndpoint.String() == index.GetAuthConfigKey() {
ep = r.indexEndpoint
} else {
- ep, err = NewEndpoint(index)
+ ep, err = NewEndpoint(index, nil)
if err != nil {
return
}
@@ -49,7 +49,7 @@ func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error)
}
// GetV2Authorization gets the authorization needed to the given image
-// If readonly access is requested, then only the authorization may
+// If readonly access is requested, then the authorization may
// only be used for Get operations.
func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) {
scopes := []string{"pull"}
@@ -77,14 +77,14 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au
method := "GET"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+ req, err := http.NewRequest(method, routeURL, nil)
if err != nil {
return nil, "", err
}
if err := auth.Authorize(req); err != nil {
return nil, "", err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, "", err
}
@@ -118,14 +118,14 @@ func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Di
method := "HEAD"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+ req, err := http.NewRequest(method, routeURL, nil)
if err != nil {
return false, err
}
if err := auth.Authorize(req); err != nil {
return false, err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return false, err
}
@@ -152,14 +152,14 @@ func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Dig
method := "GET"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+ req, err := http.NewRequest(method, routeURL, nil)
if err != nil {
return err
}
if err := auth.Authorize(req); err != nil {
return err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return err
}
@@ -183,14 +183,14 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName string, dgst dige
method := "GET"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+ req, err := http.NewRequest(method, routeURL, nil)
if err != nil {
return nil, 0, err
}
if err := auth.Authorize(req); err != nil {
return nil, 0, err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, 0, err
}
@@ -220,7 +220,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Dig
method := "PUT"
logrus.Debugf("[registry] Calling %q %s", method, location)
- req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
+ req, err := http.NewRequest(method, location, ioutil.NopCloser(blobRdr))
if err != nil {
return err
}
@@ -230,7 +230,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Dig
if err := auth.Authorize(req); err != nil {
return err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return err
}
@@ -259,7 +259,7 @@ func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *Reque
}
logrus.Debugf("[registry] Calling %q %s", "POST", routeURL)
- req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
+ req, err := http.NewRequest("POST", routeURL, nil)
if err != nil {
return "", err
}
@@ -267,7 +267,7 @@ func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *Reque
if err := auth.Authorize(req); err != nil {
return "", err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return "", err
}
@@ -305,14 +305,14 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, si
method := "PUT"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
+ req, err := http.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
if err != nil {
return "", err
}
if err := auth.Authorize(req); err != nil {
return "", err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return "", err
}
@@ -366,14 +366,14 @@ func (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestA
method := "GET"
logrus.Debugf("[registry] Calling %q %s", method, routeURL)
- req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+ req, err := http.NewRequest(method, routeURL, nil)
if err != nil {
return nil, err
}
if err := auth.Authorize(req); err != nil {
return nil, err
}
- res, _, err := r.doRequest(req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, err
}
diff --git a/registry/token.go b/registry/token.go
index b03bd891bb..e27cb6f528 100644
--- a/registry/token.go
+++ b/registry/token.go
@@ -7,15 +7,13 @@ import (
"net/http"
"net/url"
"strings"
-
- "github.com/docker/docker/pkg/requestdecorator"
)
type tokenResponse struct {
Token string `json:"token"`
}
-func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) (token string, err error) {
+func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (token string, err error) {
realm, ok := params["realm"]
if !ok {
return "", errors.New("no realm specified for token auth challenge")
@@ -34,7 +32,7 @@ func getToken(username, password string, params map[string]string, registryEndpo
}
}
- req, err := factory.NewRequest("GET", realmURL.String(), nil)
+ req, err := http.NewRequest("GET", realmURL.String(), nil)
if err != nil {
return "", err
}
@@ -58,7 +56,7 @@ func getToken(username, password string, params map[string]string, registryEndpo
req.URL.RawQuery = reqParams.Encode()
- resp, err := client.Do(req)
+ resp, err := registryEndpoint.client.Do(req)
if err != nil {
return "", err
}
diff --git a/runconfig/config.go b/runconfig/config.go
index 844958be2c..13d7189569 100644
--- a/runconfig/config.go
+++ b/runconfig/config.go
@@ -3,6 +3,7 @@ package runconfig
import (
"encoding/json"
"io"
+ "strings"
"github.com/docker/docker/nat"
)
@@ -59,6 +60,10 @@ type Command struct {
parts []string
}
+func (e *Command) ToString() string {
+ return strings.Join(e.parts, " ")
+}
+
func (e *Command) MarshalJSON() ([]byte, error) {
if e == nil {
return []byte{}, nil
@@ -117,6 +122,7 @@ type Config struct {
Cmd *Command
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
+ VolumeDriver string
WorkingDir string
Entrypoint *Entrypoint
NetworkDisabled bool
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index 87fc6c6aac..8b1a49f11b 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -45,13 +45,6 @@ func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
}
-
- if _, _, err := parse(t, "--link a"); err == nil {
- t.Fatalf("Error parsing links. `--link a` should be an error but is not")
- }
- if _, _, err := parse(t, "--link"); err == nil {
- t.Fatalf("Error parsing links. `--link` should be an error but is not")
- }
}
func TestParseRunAttach(t *testing.T) {
diff --git a/runconfig/exec.go b/runconfig/exec.go
index 8fe05be1bb..781cb3550f 100644
--- a/runconfig/exec.go
+++ b/runconfig/exec.go
@@ -18,13 +18,12 @@ type ExecConfig struct {
func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) {
var (
- flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
- flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
- flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background")
- flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])")
- flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command")
- execCmd []string
- container string
+ flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
+ flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
+ flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background")
+ flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])")
+ execCmd []string
+ container string
)
cmd.Require(flag.Min, 2)
if err := cmd.ParseFlags(args, true); err != nil {
@@ -35,12 +34,13 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) {
execCmd = parsedArgs[1:]
execConfig := &ExecConfig{
- User: *flUser,
- Privileged: *flPrivileged,
- Tty: *flTty,
- Cmd: execCmd,
- Container: container,
- Detach: *flDetach,
+ User: *flUser,
+ // TODO(vishh): Expose 'Privileged' once it is supported.
+ // + //Privileged: job.GetenvBool("Privileged"),
+ Tty: *flTty,
+ Cmd: execCmd,
+ Container: container,
+ Detach: *flDetach,
}
// If -d is not set, attach to everything by default
diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go
index 171671b6ef..1418dea4a8 100644
--- a/runconfig/hostconfig.go
+++ b/runconfig/hostconfig.go
@@ -18,7 +18,11 @@ type NetworkMode string
// IsPrivate indicates whether container use it's private network stack
func (n NetworkMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer() || n.IsNone())
+ return !(n.IsHost() || n.IsContainer())
+}
+
+func (n NetworkMode) IsBridge() bool {
+ return n == "bridge"
}
func (n NetworkMode) IsHost() bool {
@@ -72,6 +76,27 @@ func (n IpcMode) Container() string {
return ""
}
+type UTSMode string
+
+// IsPrivate indicates whether container use it's private UTS namespace
+func (n UTSMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+func (n UTSMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
type PidMode string
// IsPrivate indicates whether container use it's private pid stack
@@ -104,6 +129,18 @@ type RestartPolicy struct {
MaximumRetryCount int
}
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == "no"
+}
+
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == "always"
+}
+
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == "on-failure"
+}
+
type LogConfig struct {
Type string
Config map[string]string
@@ -162,12 +199,15 @@ type HostConfig struct {
Binds []string
ContainerIDFile string
LxcConf *LxcConfig
- Memory int64 // Memory limit (in bytes)
- MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap
- CpuShares int64 // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap
+ CpuShares int64 // CPU shares (relative weight vs. other containers)
+ CpuPeriod int64
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
CpuQuota int64
+ BlkioWeight int64 // Block IO weight (relative weight vs. other containers)
+ OomKillDisable bool // Whether to disable OOM Killer or not
Privileged bool
PortBindings nat.PortMap
Links []string
@@ -180,6 +220,7 @@ type HostConfig struct {
NetworkMode NetworkMode
IpcMode IpcMode
PidMode PidMode
+ UTSMode UTSMode
CapAdd []string
CapDrop []string
RestartPolicy RestartPolicy
diff --git a/runconfig/parse.go b/runconfig/parse.go
index 47feac866a..46ec267851 100644
--- a/runconfig/parse.go
+++ b/runconfig/parse.go
@@ -2,7 +2,6 @@ package runconfig
import (
"fmt"
- "path"
"strconv"
"strings"
@@ -15,12 +14,12 @@ import (
)
var (
- ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
- ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.")
- ErrConflictContainerNetworkAndDns = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.")
+ ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior")
+ ErrConflictNetworkAndDns = fmt.Errorf("Conflicting options: --dns and the network mode (--net)")
ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)")
- ErrConflictHostNetworkAndDns = fmt.Errorf("Conflicting options: --net=host can't be used with --dns. This configuration is invalid.")
- ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.")
+ ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior")
+ ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)")
+ ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)")
)
func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) {
@@ -48,13 +47,16 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
flCapDrop = opts.NewListOpts(nil)
flSecurityOpt = opts.NewListOpts(nil)
flLabelsFile = opts.NewListOpts(nil)
+ flLoggingOpts = opts.NewListOpts(nil)
flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container")
flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use")
+ flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use")
flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports")
flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
+ flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer")
flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image")
flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
@@ -63,9 +65,11 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])")
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+ flCpuPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period")
flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
- flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
+ flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS quota")
+ flBlkioWeight = cmd.Int64([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000")
flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container")
flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)")
flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use")
@@ -94,6 +98,9 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities")
cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options")
cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")
+ cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options")
+
+ expFlags := attachExperimentalFlags(cmd)
cmd.Require(flag.Min, 1)
@@ -101,9 +108,39 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
return nil, nil, cmd, err
}
- // Validate input params
- if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
- return nil, nil, cmd, ErrInvalidWorkingDirectory
+ var (
+ attachStdin = flAttach.Get("stdin")
+ attachStdout = flAttach.Get("stdout")
+ attachStderr = flAttach.Get("stderr")
+ )
+
+ netMode, err := parseNetMode(*flNetMode)
+ if err != nil {
+ return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err)
+ }
+
+ if (netMode.IsHost() || netMode.IsContainer()) && *flHostname != "" {
+ return nil, nil, cmd, ErrConflictNetworkHostname
+ }
+
+ if netMode.IsHost() && flLinks.Len() > 0 {
+ return nil, nil, cmd, ErrConflictHostNetworkAndLinks
+ }
+
+ if netMode.IsContainer() && flLinks.Len() > 0 {
+ return nil, nil, cmd, ErrConflictContainerNetworkAndLinks
+ }
+
+ if (netMode.IsHost() || netMode.IsContainer()) && flDns.Len() > 0 {
+ return nil, nil, cmd, ErrConflictNetworkAndDns
+ }
+
+ if (netMode.IsContainer() || netMode.IsHost()) && flExtraHosts.Len() > 0 {
+ return nil, nil, cmd, ErrConflictNetworkHosts
+ }
+
+ if (netMode.IsContainer() || netMode.IsHost()) && *flMacAddress != "" {
+ return nil, nil, cmd, ErrConflictContainerNetworkAndMac
}
// Validate the input mac address
@@ -112,31 +149,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress)
}
}
- var (
- attachStdin = flAttach.Get("stdin")
- attachStdout = flAttach.Get("stdout")
- attachStderr = flAttach.Get("stderr")
- )
-
- if *flNetMode != "bridge" && *flNetMode != "none" && *flHostname != "" {
- return nil, nil, cmd, ErrConflictNetworkHostname
- }
-
- if *flNetMode == "host" && flLinks.Len() > 0 {
- return nil, nil, cmd, ErrConflictHostNetworkAndLinks
- }
-
- if strings.HasPrefix(*flNetMode, "container") && flLinks.Len() > 0 {
- return nil, nil, cmd, ErrConflictContainerNetworkAndLinks
- }
-
- if *flNetMode == "host" && flDns.Len() > 0 {
- return nil, nil, cmd, ErrConflictHostNetworkAndDns
- }
-
- if strings.HasPrefix(*flNetMode, "container") && flDns.Len() > 0 {
- return nil, nil, cmd, ErrConflictContainerNetworkAndDns
- }
// If neither -d or -a are set, attach to everything by default
if flAttach.Len() == 0 {
@@ -272,9 +284,9 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode")
}
- netMode, err := parseNetMode(*flNetMode)
- if err != nil {
- return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err)
+ utsMode := UTSMode(*flUTSMode)
+ if !utsMode.Valid() {
+ return nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode")
}
restartPolicy, err := ParseRestartPolicy(*flRestartPolicy)
@@ -282,6 +294,11 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
return nil, nil, cmd, err
}
+ loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll())
+ if err != nil {
+ return nil, nil, cmd, err
+ }
+
config := &Config{
Hostname: hostname,
Domainname: domainname,
@@ -311,9 +328,12 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
Memory: flMemory,
MemorySwap: MemorySwap,
CpuShares: *flCpuShares,
+ CpuPeriod: *flCpuPeriod,
CpusetCpus: *flCpusetCpus,
CpusetMems: *flCpusetMems,
CpuQuota: *flCpuQuota,
+ BlkioWeight: *flBlkioWeight,
+ OomKillDisable: *flOomKillDisable,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks.GetAll(),
@@ -325,6 +345,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
NetworkMode: netMode,
IpcMode: ipcMode,
PidMode: pidMode,
+ UTSMode: utsMode,
Devices: deviceMappings,
CapAdd: flCapAdd.GetAll(),
CapDrop: flCapDrop.GetAll(),
@@ -332,10 +353,12 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
SecurityOpt: flSecurityOpt.GetAll(),
ReadonlyRootfs: *flReadonlyRootfs,
Ulimits: flUlimits.GetList(),
- LogConfig: LogConfig{Type: *flLoggingDriver},
+ LogConfig: LogConfig{Type: *flLoggingDriver, Config: loggingOpts},
CgroupParent: *flCgroupParent,
}
+ applyExperimentalFlags(expFlags, config, hostConfig)
+
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
@@ -374,6 +397,15 @@ func convertKVStringsToMap(values []string) map[string]string {
return result
}
+func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) {
+ loggingOptsMap := convertKVStringsToMap(loggingOpts)
+ if loggingDriver == "none" && len(loggingOpts) > 0 {
+ return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver)
+ }
+ //TODO - validation step
+ return loggingOptsMap, nil
+}
+
// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
func ParseRestartPolicy(policy string) (RestartPolicy, error) {
p := RestartPolicy{}
diff --git a/runconfig/parse_experimental.go b/runconfig/parse_experimental.go
new file mode 100644
index 0000000000..886b377fa8
--- /dev/null
+++ b/runconfig/parse_experimental.go
@@ -0,0 +1,19 @@
+// +build experimental
+
+package runconfig
+
+import flag "github.com/docker/docker/pkg/mflag"
+
+type experimentalFlags struct {
+ flags map[string]interface{}
+}
+
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
+ flags := make(map[string]interface{})
+ flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
+ return &experimentalFlags{flags: flags}
+}
+
+func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) {
+ config.VolumeDriver = *(exp.flags["volume-driver"]).(*string)
+}
diff --git a/runconfig/parse_stub.go b/runconfig/parse_stub.go
new file mode 100644
index 0000000000..391b6ed43b
--- /dev/null
+++ b/runconfig/parse_stub.go
@@ -0,0 +1,14 @@
+// +build !experimental
+
+package runconfig
+
+import flag "github.com/docker/docker/pkg/mflag"
+
+type experimentalFlags struct{}
+
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
+ return nil
+}
+
+func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) {
+}
diff --git a/utils/experimental.go b/utils/experimental.go
new file mode 100644
index 0000000000..b308a59faf
--- /dev/null
+++ b/utils/experimental.go
@@ -0,0 +1,7 @@
+// +build experimental
+
+package utils
+
+func ExperimentalBuild() bool {
+ return true
+}
diff --git a/utils/git.go b/utils/git.go
index 18e002d184..ce8924d8af 100644
--- a/utils/git.go
+++ b/utils/git.go
@@ -4,7 +4,10 @@ import (
"fmt"
"io/ioutil"
"net/http"
+ "net/url"
+ "os"
"os/exec"
+ "path/filepath"
"strings"
"github.com/docker/docker/pkg/urlutil"
@@ -19,20 +22,26 @@ func GitClone(remoteURL string) (string, error) {
return "", err
}
- clone := cloneArgs(remoteURL, root)
+ u, err := url.Parse(remoteURL)
+ if err != nil {
+ return "", err
+ }
- if output, err := exec.Command("git", clone...).CombinedOutput(); err != nil {
+ fragment := u.Fragment
+ clone := cloneArgs(u, root)
+
+ if output, err := git(clone...); err != nil {
return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
}
- return root, nil
+ return checkoutGit(fragment, root)
}
-func cloneArgs(remoteURL, root string) []string {
+func cloneArgs(remoteURL *url.URL, root string) []string {
args := []string{"clone", "--recursive"}
- shallow := true
+ shallow := len(remoteURL.Fragment) == 0
- if strings.HasPrefix(remoteURL, "http") {
+ if shallow && strings.HasPrefix(remoteURL.Scheme, "http") {
res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL))
if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
shallow = false
@@ -43,5 +52,42 @@ func cloneArgs(remoteURL, root string) []string {
args = append(args, "--depth", "1")
}
- return append(args, remoteURL, root)
+ if remoteURL.Fragment != "" {
+ remoteURL.Fragment = ""
+ }
+
+ return append(args, remoteURL.String(), root)
+}
+
+func checkoutGit(fragment, root string) (string, error) {
+ refAndDir := strings.SplitN(fragment, ":", 2)
+
+ if len(refAndDir[0]) != 0 {
+ if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil {
+ return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+ }
+ }
+
+ if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
+ newCtx := filepath.Join(root, refAndDir[1])
+ fi, err := os.Stat(newCtx)
+ if err != nil {
+ return "", err
+ }
+ if !fi.IsDir() {
+ return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx)
+ }
+ root = newCtx
+ }
+
+ return root, nil
+}
+
+func gitWithinDir(dir string, args ...string) ([]byte, error) {
+ a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")}
+ return git(append(a, args...)...)
+}
+
+func git(args ...string) ([]byte, error) {
+ return exec.Command("git", args...).CombinedOutput()
}
diff --git a/utils/git_test.go b/utils/git_test.go
index a82841ae11..10b13e9627 100644
--- a/utils/git_test.go
+++ b/utils/git_test.go
@@ -2,9 +2,12 @@ package utils
import (
"fmt"
+ "io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
+ "os"
+ "path/filepath"
"reflect"
"testing"
)
@@ -22,7 +25,7 @@ func TestCloneArgsSmartHttp(t *testing.T) {
w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q))
})
- args := cloneArgs(gitURL, "/tmp")
+ args := cloneArgs(serverURL, "/tmp")
exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"}
if !reflect.DeepEqual(args, exp) {
t.Fatalf("Expected %v, got %v", exp, args)
@@ -41,16 +44,132 @@ func TestCloneArgsDumbHttp(t *testing.T) {
w.Header().Set("Content-Type", "text/plain")
})
- args := cloneArgs(gitURL, "/tmp")
+ args := cloneArgs(serverURL, "/tmp")
exp := []string{"clone", "--recursive", gitURL, "/tmp"}
if !reflect.DeepEqual(args, exp) {
t.Fatalf("Expected %v, got %v", exp, args)
}
}
+
func TestCloneArgsGit(t *testing.T) {
- args := cloneArgs("git://github.com/docker/docker", "/tmp")
+ u, _ := url.Parse("git://github.com/docker/docker")
+ args := cloneArgs(u, "/tmp")
exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"}
if !reflect.DeepEqual(args, exp) {
t.Fatalf("Expected %v, got %v", exp, args)
}
}
+
+func TestCloneArgsStripFragment(t *testing.T) {
+ u, _ := url.Parse("git://github.com/docker/docker#test")
+ args := cloneArgs(u, "/tmp")
+ exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"}
+ if !reflect.DeepEqual(args, exp) {
+ t.Fatalf("Expected %v, got %v", exp, args)
+ }
+}
+
+func TestCheckoutGit(t *testing.T) {
+ root, err := ioutil.TempDir("", "docker-build-git-checkout")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(root)
+
+ gitDir := filepath.Join(root, "repo")
+ _, err = git("init", gitDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ subDir := filepath.Join(gitDir, "subdir")
+ if err = os.Mkdir(subDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ frag string
+ exp string
+ fail bool
+ }{
+ {"", "FROM scratch", false},
+ {"master", "FROM scratch", false},
+ {":subdir", "FROM scratch\nEXPOSE 5000", false},
+ {":nosubdir", "", true}, // missing directory error
+ {":Dockerfile", "", true}, // not a directory error
+ {"master:nosubdir", "", true},
+ {"master:subdir", "FROM scratch\nEXPOSE 5000", false},
+ {"test", "FROM scratch\nEXPOSE 3000", false},
+ {"test:", "FROM scratch\nEXPOSE 3000", false},
+ {"test:subdir", "FROM busybox\nEXPOSE 5000", false},
+ }
+
+ for _, c := range cases {
+ r, err := checkoutGit(c.frag, gitDir)
+
+ fail := err != nil
+ if fail != c.fail {
+ t.Fatalf("Expected %v failure, error was %v\n", c.fail, err)
+ }
+ if c.fail {
+ continue
+ }
+
+ b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != c.exp {
+ t.Fatalf("Expected %v, was %v\n", c.exp, string(b))
+ }
+ }
+}
diff --git a/utils/stubs.go b/utils/stubs.go
new file mode 100644
index 0000000000..b376f0cfb5
--- /dev/null
+++ b/utils/stubs.go
@@ -0,0 +1,7 @@
+// +build !experimental
+
+package utils
+
+func ExperimentalBuild() bool {
+ return false
+}
diff --git a/utils/tcp.go b/utils/tcp.go
new file mode 100644
index 0000000000..75980ff69a
--- /dev/null
+++ b/utils/tcp.go
@@ -0,0 +1,22 @@
+package utils
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
+ // Why 32? See https://github.com/docker/docker/pull/8035.
+ timeout := 32 * time.Second
+ if proto == "unix" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, timeout)
+ }
+ } else {
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+ }
+}
diff --git a/utils/utils.go b/utils/utils.go
index 05dfb757a3..cb1b7b34cc 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -7,18 +7,15 @@ import (
"fmt"
"io"
"io/ioutil"
- "net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
- "sync"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
- "github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringid"
)
@@ -123,47 +120,6 @@ func DockerInitPath(localCopy string) string {
return ""
}
-// FIXME: move to httputils? ioutils?
-type WriteFlusher struct {
- sync.Mutex
- w io.Writer
- flusher http.Flusher
- flushed bool
-}
-
-func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
- wf.Lock()
- defer wf.Unlock()
- n, err = wf.w.Write(b)
- wf.flushed = true
- wf.flusher.Flush()
- return n, err
-}
-
-// Flush the stream immediately.
-func (wf *WriteFlusher) Flush() {
- wf.Lock()
- defer wf.Unlock()
- wf.flushed = true
- wf.flusher.Flush()
-}
-
-func (wf *WriteFlusher) Flushed() bool {
- wf.Lock()
- defer wf.Unlock()
- return wf.flushed
-}
-
-func NewWriteFlusher(w io.Writer) *WriteFlusher {
- var flusher http.Flusher
- if f, ok := w.(http.Flusher); ok {
- flusher = f
- } else {
- flusher = &ioutils.NopFlusher{}
- }
- return &WriteFlusher{w: w, flusher: flusher}
-}
-
var globalTestID string
// TestDirectory creates a new temporary directory and returns its path.
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
deleted file mode 100644
index e363aa793e..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
-//
-// References:
-// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-// http://www.gnu.org/software/tar/manual/html_node/Standard.html
-// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
-package tar
-
-import (
- "bytes"
- "errors"
- "fmt"
- "os"
- "path"
- "time"
-)
-
-const (
- blockSize = 512
-
- // Types
- TypeReg = '0' // regular file
- TypeRegA = '\x00' // regular file
- TypeLink = '1' // hard link
- TypeSymlink = '2' // symbolic link
- TypeChar = '3' // character device node
- TypeBlock = '4' // block device node
- TypeDir = '5' // directory
- TypeFifo = '6' // fifo node
- TypeCont = '7' // reserved
- TypeXHeader = 'x' // extended header
- TypeXGlobalHeader = 'g' // global extended header
- TypeGNULongName = 'L' // Next file has a long name
- TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
- TypeGNUSparse = 'S' // sparse file
-)
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-type Header struct {
- Name string // name of header file entry
- Mode int64 // permission and mode bits
- Uid int // user id of owner
- Gid int // group id of owner
- Size int64 // length in bytes
- ModTime time.Time // modified time
- Typeflag byte // type of header entry
- Linkname string // target name of link
- Uname string // user name of owner
- Gname string // group name of owner
- Devmajor int64 // major number of character or block device
- Devminor int64 // minor number of character or block device
- AccessTime time.Time // access time
- ChangeTime time.Time // status change time
- Xattrs map[string]string
-}
-
-// File name constants from the tar spec.
-const (
- fileNameSize = 100 // Maximum number of bytes in a standard tar name.
- fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
-)
-
-// FileInfo returns an os.FileInfo for the Header.
-func (h *Header) FileInfo() os.FileInfo {
- return headerFileInfo{h}
-}
-
-// headerFileInfo implements os.FileInfo.
-type headerFileInfo struct {
- h *Header
-}
-
-func (fi headerFileInfo) Size() int64 { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{} { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
- if fi.IsDir() {
- return path.Base(path.Clean(fi.h.Name))
- }
- return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode os.FileMode) {
- // Set file permission bits.
- mode = os.FileMode(fi.h.Mode).Perm()
-
- // Set setuid, setgid and sticky bits.
- if fi.h.Mode&c_ISUID != 0 {
- // setuid
- mode |= os.ModeSetuid
- }
- if fi.h.Mode&c_ISGID != 0 {
- // setgid
- mode |= os.ModeSetgid
- }
- if fi.h.Mode&c_ISVTX != 0 {
- // sticky
- mode |= os.ModeSticky
- }
-
- // Set file mode bits.
- // clear perm, setuid, setgid and sticky bits.
- m := os.FileMode(fi.h.Mode) &^ 07777
- if m == c_ISDIR {
- // directory
- mode |= os.ModeDir
- }
- if m == c_ISFIFO {
- // named pipe (FIFO)
- mode |= os.ModeNamedPipe
- }
- if m == c_ISLNK {
- // symbolic link
- mode |= os.ModeSymlink
- }
- if m == c_ISBLK {
- // device file
- mode |= os.ModeDevice
- }
- if m == c_ISCHR {
- // Unix character device
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- }
- if m == c_ISSOCK {
- // Unix domain socket
- mode |= os.ModeSocket
- }
-
- switch fi.h.Typeflag {
- case TypeLink, TypeSymlink:
- // hard link, symbolic link
- mode |= os.ModeSymlink
- case TypeChar:
- // character device node
- mode |= os.ModeDevice
- mode |= os.ModeCharDevice
- case TypeBlock:
- // block device node
- mode |= os.ModeDevice
- case TypeDir:
- // directory
- mode |= os.ModeDir
- case TypeFifo:
- // fifo node
- mode |= os.ModeNamedPipe
- }
-
- return mode
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, h *Header) error
-
-// Mode constants from the tar spec.
-const (
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
- c_ISDIR = 040000 // Directory
- c_ISFIFO = 010000 // FIFO
- c_ISREG = 0100000 // Regular file
- c_ISLNK = 0120000 // Symbolic link
- c_ISBLK = 060000 // Block special file
- c_ISCHR = 020000 // Character special file
- c_ISSOCK = 0140000 // Socket
-)
-
-// Keywords for the PAX Extended Header
-const (
- paxAtime = "atime"
- paxCharset = "charset"
- paxComment = "comment"
- paxCtime = "ctime" // please note that ctime is not a valid pax header.
- paxGid = "gid"
- paxGname = "gname"
- paxLinkpath = "linkpath"
- paxMtime = "mtime"
- paxPath = "path"
- paxSize = "size"
- paxUid = "uid"
- paxUname = "uname"
- paxXattr = "SCHILY.xattr."
- paxNone = ""
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
-func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
- if fi == nil {
- return nil, errors.New("tar: FileInfo is nil")
- }
- fm := fi.Mode()
- h := &Header{
- Name: fi.Name(),
- ModTime: fi.ModTime(),
- Mode: int64(fm.Perm()), // or'd with c_IS* constants later
- }
- switch {
- case fm.IsRegular():
- h.Mode |= c_ISREG
- h.Typeflag = TypeReg
- h.Size = fi.Size()
- case fi.IsDir():
- h.Typeflag = TypeDir
- h.Mode |= c_ISDIR
- h.Name += "/"
- case fm&os.ModeSymlink != 0:
- h.Typeflag = TypeSymlink
- h.Mode |= c_ISLNK
- h.Linkname = link
- case fm&os.ModeDevice != 0:
- if fm&os.ModeCharDevice != 0 {
- h.Mode |= c_ISCHR
- h.Typeflag = TypeChar
- } else {
- h.Mode |= c_ISBLK
- h.Typeflag = TypeBlock
- }
- case fm&os.ModeNamedPipe != 0:
- h.Typeflag = TypeFifo
- h.Mode |= c_ISFIFO
- case fm&os.ModeSocket != 0:
- h.Mode |= c_ISSOCK
- default:
- return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
- }
- if fm&os.ModeSetuid != 0 {
- h.Mode |= c_ISUID
- }
- if fm&os.ModeSetgid != 0 {
- h.Mode |= c_ISGID
- }
- if fm&os.ModeSticky != 0 {
- h.Mode |= c_ISVTX
- }
- if sysStat != nil {
- return h, sysStat(fi, h)
- }
- return h, nil
-}
-
-var zeroBlock = make([]byte, blockSize)
-
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
-// We compute and return both.
-func checksum(header []byte) (unsigned int64, signed int64) {
- for i := 0; i < len(header); i++ {
- if i == 148 {
- // The chksum field (header[148:156]) is special: it should be treated as space bytes.
- unsigned += ' ' * 8
- signed += ' ' * 8
- i += 7
- continue
- }
- unsigned += int64(header[i])
- signed += int64(int8(header[i]))
- }
- return
-}
-
-type slicer []byte
-
-func (sp *slicer) next(n int) (b []byte) {
- s := *sp
- b, *sp = s[0:n], s[n:]
- return
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if c >= 0x80 {
- return false
- }
- }
- return true
-}
-
-func toASCII(s string) string {
- if isASCII(s) {
- return s
- }
- var buf bytes.Buffer
- for _, c := range s {
- if c < 0x80 {
- buf.WriteByte(byte(c))
- }
- }
- return buf.String()
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
deleted file mode 100644
index 351eaa0e6c..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar_test
-
-import (
- "archive/tar"
- "bytes"
- "fmt"
- "io"
- "log"
- "os"
-)
-
-func Example() {
- // Create a buffer to write our archive to.
- buf := new(bytes.Buffer)
-
- // Create a new tar archive.
- tw := tar.NewWriter(buf)
-
- // Add some files to the archive.
- var files = []struct {
- Name, Body string
- }{
- {"readme.txt", "This archive contains some text files."},
- {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
- {"todo.txt", "Get animal handling licence."},
- }
- for _, file := range files {
- hdr := &tar.Header{
- Name: file.Name,
- Size: int64(len(file.Body)),
- }
- if err := tw.WriteHeader(hdr); err != nil {
- log.Fatalln(err)
- }
- if _, err := tw.Write([]byte(file.Body)); err != nil {
- log.Fatalln(err)
- }
- }
- // Make sure to check the error on Close.
- if err := tw.Close(); err != nil {
- log.Fatalln(err)
- }
-
- // Open the tar archive for reading.
- r := bytes.NewReader(buf.Bytes())
- tr := tar.NewReader(r)
-
- // Iterate through the files in the archive.
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- // end of tar archive
- break
- }
- if err != nil {
- log.Fatalln(err)
- }
- fmt.Printf("Contents of %s:\n", hdr.Name)
- if _, err := io.Copy(os.Stdout, tr); err != nil {
- log.Fatalln(err)
- }
- fmt.Println()
- }
-
- // Output:
- // Contents of readme.txt:
- // This archive contains some text files.
- // Contents of gopher.txt:
- // Gopher names:
- // George
- // Geoffrey
- // Gonzo
- // Contents of todo.txt:
- // Get animal handling licence.
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
deleted file mode 100644
index a27559d0f0..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
+++ /dev/null
@@ -1,820 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - pax extensions
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-const maxNanoSecondIntSize = 9
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
-type Reader struct {
- r io.Reader
- err error
- pad int64 // amount of padding (ignored) after current file entry
- curr numBytesReader // reader for current file entry
- hdrBuff [blockSize]byte // buffer to use in readHeader
-}
-
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
- io.Reader
- numBytes() int64
-}
-
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
- r io.Reader // underlying reader
- nb int64 // number of unread bytes for current file entry
-}
-
-// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
-type sparseFileReader struct {
- rfr *regFileReader // reads the sparse-encoded file data
- sp []sparseEntry // the sparse map for the file
- pos int64 // keeps track of file position
- tot int64 // total size of the file
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
- paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
- paxGNUSparseOffset = "GNU.sparse.offset"
- paxGNUSparseNumBytes = "GNU.sparse.numbytes"
- paxGNUSparseMap = "GNU.sparse.map"
- paxGNUSparseName = "GNU.sparse.name"
- paxGNUSparseMajor = "GNU.sparse.major"
- paxGNUSparseMinor = "GNU.sparse.minor"
- paxGNUSparseSize = "GNU.sparse.size"
- paxGNUSparseRealSize = "GNU.sparse.realsize"
-)
-
-// Keywords for old GNU sparse headers
-const (
- oldGNUSparseMainHeaderOffset = 386
- oldGNUSparseMainHeaderIsExtendedOffset = 482
- oldGNUSparseMainHeaderNumEntries = 4
- oldGNUSparseExtendedHeaderIsExtendedOffset = 504
- oldGNUSparseExtendedHeaderNumEntries = 21
- oldGNUSparseOffsetSize = 12
- oldGNUSparseNumBytesSize = 12
-)
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
-
-// Next advances to the next entry in the tar archive.
-func (tr *Reader) Next() (*Header, error) {
- var hdr *Header
- if tr.err == nil {
- tr.skipUnread()
- }
- if tr.err != nil {
- return hdr, tr.err
- }
- hdr = tr.readHeader()
- if hdr == nil {
- return hdr, tr.err
- }
- // Check for PAX/GNU header.
- switch hdr.Typeflag {
- case TypeXHeader:
- // PAX extended header
- headers, err := parsePAX(tr)
- if err != nil {
- return nil, err
- }
- // We actually read the whole file,
- // but this skips alignment padding
- tr.skipUnread()
- hdr = tr.readHeader()
- mergePAX(hdr, headers)
-
- // Check for a PAX format sparse file
- sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
- if err != nil {
- tr.err = err
- return nil, err
- }
- if sp != nil {
- // Current file is a PAX format GNU sparse file.
- // Set the current file reader to a sparse file reader.
- tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
- }
- return hdr, nil
- case TypeGNULongName:
- // We have a GNU long name header. Its contents are the real file name.
- realname, err := ioutil.ReadAll(tr)
- if err != nil {
- return nil, err
- }
- hdr, err := tr.Next()
- hdr.Name = cString(realname)
- return hdr, err
- case TypeGNULongLink:
- // We have a GNU long link header.
- realname, err := ioutil.ReadAll(tr)
- if err != nil {
- return nil, err
- }
- hdr, err := tr.Next()
- hdr.Linkname = cString(realname)
- return hdr, err
- }
- return hdr, tr.err
-}
-
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
- var sparseFormat string
-
- // Check for sparse format indicators
- major, majorOk := headers[paxGNUSparseMajor]
- minor, minorOk := headers[paxGNUSparseMinor]
- sparseName, sparseNameOk := headers[paxGNUSparseName]
- _, sparseMapOk := headers[paxGNUSparseMap]
- sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
- sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
- // Identify which, if any, sparse format applies from which PAX headers are set
- if majorOk && minorOk {
- sparseFormat = major + "." + minor
- } else if sparseNameOk && sparseMapOk {
- sparseFormat = "0.1"
- } else if sparseSizeOk {
- sparseFormat = "0.0"
- } else {
- // Not a PAX format GNU sparse file.
- return nil, nil
- }
-
- // Check for unknown sparse format
- if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
- return nil, nil
- }
-
- // Update hdr from GNU sparse PAX headers
- if sparseNameOk {
- hdr.Name = sparseName
- }
- if sparseSizeOk {
- realSize, err := strconv.ParseInt(sparseSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- } else if sparseRealSizeOk {
- realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = realSize
- }
-
- // Set up the sparse map, according to the particular sparse format in use
- var sp []sparseEntry
- var err error
- switch sparseFormat {
- case "0.0", "0.1":
- sp, err = readGNUSparseMap0x1(headers)
- case "1.0":
- sp, err = readGNUSparseMap1x0(tr.curr)
- }
- return sp, err
-}
-
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) error {
- for k, v := range headers {
- switch k {
- case paxPath:
- hdr.Name = v
- case paxLinkpath:
- hdr.Linkname = v
- case paxGname:
- hdr.Gname = v
- case paxUname:
- hdr.Uname = v
- case paxUid:
- uid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Uid = int(uid)
- case paxGid:
- gid, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Gid = int(gid)
- case paxAtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.AccessTime = t
- case paxMtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ModTime = t
- case paxCtime:
- t, err := parsePAXTime(v)
- if err != nil {
- return err
- }
- hdr.ChangeTime = t
- case paxSize:
- size, err := strconv.ParseInt(v, 10, 0)
- if err != nil {
- return err
- }
- hdr.Size = int64(size)
- default:
- if strings.HasPrefix(k, paxXattr) {
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
- }
- hdr.Xattrs[k[len(paxXattr):]] = v
- }
- }
- }
- return nil
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in
-// the PAX specification.
-func parsePAXTime(t string) (time.Time, error) {
- buf := []byte(t)
- pos := bytes.IndexByte(buf, '.')
- var seconds, nanoseconds int64
- var err error
- if pos == -1 {
- seconds, err = strconv.ParseInt(t, 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- } else {
- seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- nano_buf := string(buf[pos+1:])
- // Pad as needed before converting to a decimal.
- // For example .030 -> .030000000 -> 30000000 nanoseconds
- if len(nano_buf) < maxNanoSecondIntSize {
- // Right pad
- nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
- } else if len(nano_buf) > maxNanoSecondIntSize {
- // Right truncate
- nano_buf = nano_buf[:maxNanoSecondIntSize]
- }
- nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
- if err != nil {
- return time.Time{}, err
- }
- }
- ts := time.Unix(seconds, nanoseconds)
- return ts, nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
-func parsePAX(r io.Reader) (map[string]string, error) {
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- // For GNU PAX sparse format 0.0 support.
- // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
- var sparseMap bytes.Buffer
-
- headers := make(map[string]string)
- // Each record is constructed as
- // "%d %s=%s\n", length, keyword, value
- for len(buf) > 0 {
- // or the header was empty to start with.
- var sp int
- // The size field ends at the first space.
- sp = bytes.IndexByte(buf, ' ')
- if sp == -1 {
- return nil, ErrHeader
- }
- // Parse the first token as a decimal integer.
- n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- // Extract everything between the decimal and the n -1 on the
- // beginning to eat the ' ', -1 on the end to skip the newline.
- var record []byte
- record, buf = buf[sp+1:n-1], buf[n:]
- // The first equals is guaranteed to mark the end of the key.
- // Everything else is value.
- eq := bytes.IndexByte(record, '=')
- if eq == -1 {
- return nil, ErrHeader
- }
- key, value := record[:eq], record[eq+1:]
-
- keyStr := string(key)
- if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
- // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
- sparseMap.Write(value)
- sparseMap.Write([]byte{','})
- } else {
- // Normal key. Set the value in the headers map.
- headers[keyStr] = string(value)
- }
- }
- if sparseMap.Len() != 0 {
- // Add sparse info to headers, chopping off the extra comma
- sparseMap.Truncate(sparseMap.Len() - 1)
- headers[paxGNUSparseMap] = sparseMap.String()
- }
- return headers, nil
-}
-
-// cString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func cString(b []byte) string {
- n := 0
- for n < len(b) && b[n] != 0 {
- n++
- }
- return string(b[0:n])
-}
-
-func (tr *Reader) octal(b []byte) int64 {
- // Check for binary format first.
- if len(b) > 0 && b[0]&0x80 != 0 {
- var x int64
- for i, c := range b {
- if i == 0 {
- c &= 0x7f // ignore signal bit in first byte
- }
- x = x<<8 | int64(c)
- }
- return x
- }
-
- // Because unused fields are filled with NULs, we need
- // to skip leading NULs. Fields may also be padded with
- // spaces or NULs.
- // So we remove leading and trailing NULs and spaces to
- // be sure.
- b = bytes.Trim(b, " \x00")
-
- if len(b) == 0 {
- return 0
- }
- x, err := strconv.ParseUint(cString(b), 8, 64)
- if err != nil {
- tr.err = err
- }
- return int64(x)
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
-func (tr *Reader) skipUnread() {
- nr := tr.numBytes() + tr.pad // number of bytes to skip
- tr.curr, tr.pad = nil, 0
- if sr, ok := tr.r.(io.Seeker); ok {
- if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
- return
- }
- }
- _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
-}
-
-func (tr *Reader) verifyChecksum(header []byte) bool {
- if tr.err != nil {
- return false
- }
-
- given := tr.octal(header[148:156])
- unsigned, signed := checksum(header)
- return given == unsigned || given == signed
-}
-
-func (tr *Reader) readHeader() *Header {
- header := tr.hdrBuff[:]
- copy(header, zeroBlock)
-
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil
- }
-
- // Two blocks of zero bytes marks the end of the archive.
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
- return nil
- }
- if bytes.Equal(header, zeroBlock[0:blockSize]) {
- tr.err = io.EOF
- } else {
- tr.err = ErrHeader // zero block and then non-zero block
- }
- return nil
- }
-
- if !tr.verifyChecksum(header) {
- tr.err = ErrHeader
- return nil
- }
-
- // Unpack
- hdr := new(Header)
- s := slicer(header)
-
- hdr.Name = cString(s.next(100))
- hdr.Mode = tr.octal(s.next(8))
- hdr.Uid = int(tr.octal(s.next(8)))
- hdr.Gid = int(tr.octal(s.next(8)))
- hdr.Size = tr.octal(s.next(12))
- hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
- s.next(8) // chksum
- hdr.Typeflag = s.next(1)[0]
- hdr.Linkname = cString(s.next(100))
-
- // The remainder of the header depends on the value of magic.
- // The original (v7) version of tar had no explicit magic field,
- // so its magic bytes, like the rest of the block, are NULs.
- magic := string(s.next(8)) // contains version field as well.
- var format string
- switch {
- case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
- if string(header[508:512]) == "tar\x00" {
- format = "star"
- } else {
- format = "posix"
- }
- case magic == "ustar \x00": // old GNU tar
- format = "gnu"
- }
-
- switch format {
- case "posix", "gnu", "star":
- hdr.Uname = cString(s.next(32))
- hdr.Gname = cString(s.next(32))
- devmajor := s.next(8)
- devminor := s.next(8)
- if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
- hdr.Devmajor = tr.octal(devmajor)
- hdr.Devminor = tr.octal(devminor)
- }
- var prefix string
- switch format {
- case "posix", "gnu":
- prefix = cString(s.next(155))
- case "star":
- prefix = cString(s.next(131))
- hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
- hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
- }
- if len(prefix) > 0 {
- hdr.Name = prefix + "/" + hdr.Name
- }
- }
-
- if tr.err != nil {
- tr.err = ErrHeader
- return nil
- }
-
- // Maximum value of hdr.Size is 64 GB (12 octal digits),
- // so there's no risk of int64 overflowing.
- nb := int64(hdr.Size)
- tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
-
- // Set the current file reader.
- tr.curr = ®FileReader{r: tr.r, nb: nb}
-
- // Check for old GNU sparse format entry.
- if hdr.Typeflag == TypeGNUSparse {
- // Get the real size of the file.
- hdr.Size = tr.octal(header[483:495])
-
- // Read the sparse map.
- sp := tr.readOldGNUSparseMap(header)
- if tr.err != nil {
- return nil
- }
- // Current file is a GNU sparse file. Update the current file reader.
- tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
- }
-
- return hdr
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-// A sparse entry indicates the offset and size in a sparse file of a
-// block of data.
-type sparseEntry struct {
- offset int64
- numBytes int64
-}
-
-// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
-// then one or more extension headers are used to store the rest of the sparse map.
-func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
- isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
- spCap := oldGNUSparseMainHeaderNumEntries
- if isExtended {
- spCap += oldGNUSparseExtendedHeaderNumEntries
- }
- sp := make([]sparseEntry, 0, spCap)
- s := slicer(header[oldGNUSparseMainHeaderOffset:])
-
- // Read the four entries from the main tar header
- for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
- offset := tr.octal(s.next(oldGNUSparseOffsetSize))
- numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
- if tr.err != nil {
- tr.err = ErrHeader
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
-
- for isExtended {
- // There are more entries. Read an extension header and parse its entries.
- sparseHeader := make([]byte, blockSize)
- if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
- return nil
- }
- isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
- s = slicer(sparseHeader)
- for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
- offset := tr.octal(s.next(oldGNUSparseOffsetSize))
- numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
- if tr.err != nil {
- tr.err = ErrHeader
- return nil
- }
- if offset == 0 && numBytes == 0 {
- break
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
- }
- return sp
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
-// The sparse map is stored just before the file data and padded out to the nearest block boundary.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
- buf := make([]byte, 2*blockSize)
- sparseHeader := buf[:blockSize]
-
- // readDecimal is a helper function to read a decimal integer from the sparse map
- // while making sure to read from the file in blocks of size blockSize
- readDecimal := func() (int64, error) {
- // Look for newline
- nl := bytes.IndexByte(sparseHeader, '\n')
- if nl == -1 {
- if len(sparseHeader) >= blockSize {
- // This is an error
- return 0, ErrHeader
- }
- oldLen := len(sparseHeader)
- newLen := oldLen + blockSize
- if cap(sparseHeader) < newLen {
- // There's more header, but we need to make room for the next block
- copy(buf, sparseHeader)
- sparseHeader = buf[:newLen]
- } else {
- // There's more header, and we can just reslice
- sparseHeader = sparseHeader[:newLen]
- }
-
- // Now that sparseHeader is large enough, read next block
- if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
- return 0, err
- }
-
- // Look for a newline in the new data
- nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
- if nl == -1 {
- // This is an error
- return 0, ErrHeader
- }
- nl += oldLen // We want the position from the beginning
- }
- // Now that we've found a newline, read a number
- n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
- if err != nil {
- return 0, ErrHeader
- }
-
- // Update sparseHeader to consume this number
- sparseHeader = sparseHeader[nl+1:]
- return n, nil
- }
-
- // Read the first block
- if _, err := io.ReadFull(r, sparseHeader); err != nil {
- return nil, err
- }
-
- // The first line contains the number of entries
- numEntries, err := readDecimal()
- if err != nil {
- return nil, err
- }
-
- // Read all the entries
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- // Read the offset
- offset, err := readDecimal()
- if err != nil {
- return nil, err
- }
- // Read numBytes
- numBytes, err := readDecimal()
- if err != nil {
- return nil, err
- }
-
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
-
- return sp, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
-// The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
- // Get number of entries
- numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
- if !ok {
- return nil, ErrHeader
- }
- numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
-
- sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
-
- // There should be two numbers in sparseMap for each entry
- if int64(len(sparseMap)) != 2*numEntries {
- return nil, ErrHeader
- }
-
- // Loop through the entries in the sparse map
- sp := make([]sparseEntry, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
- if err != nil {
- return nil, ErrHeader
- }
- sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
- }
-
- return sp, nil
-}
-
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
- if tr.curr == nil {
- // No current file, so no bytes
- return 0
- }
- return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
-func (tr *Reader) Read(b []byte) (n int, err error) {
- if tr.curr == nil {
- return 0, io.EOF
- }
- n, err = tr.curr.Read(b)
- if err != nil && err != io.EOF {
- tr.err = err
- }
- return
-}
-
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
- if rfr.nb == 0 {
- // file consumed
- return 0, io.EOF
- }
- if int64(len(b)) > rfr.nb {
- b = b[0:rfr.nb]
- }
- n, err = rfr.r.Read(b)
- rfr.nb -= int64(n)
-
- if err == io.EOF && rfr.nb > 0 {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
- return rfr.nb
-}
-
-// readHole reads a sparse file hole ending at offset toOffset
-func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
- n64 := toOffset - sfr.pos
- if n64 > int64(len(b)) {
- n64 = int64(len(b))
- }
- n := int(n64)
- for i := 0; i < n; i++ {
- b[i] = 0
- }
- sfr.pos += n64
- return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
- if len(sfr.sp) == 0 {
- // No more data fragments to read from.
- if sfr.pos < sfr.tot {
- // We're in the last hole
- n = sfr.readHole(b, sfr.tot)
- return
- }
- // Otherwise, we're at the end of the file
- return 0, io.EOF
- }
- if sfr.pos < sfr.sp[0].offset {
- // We're in a hole
- n = sfr.readHole(b, sfr.sp[0].offset)
- return
- }
-
- // We're not in a hole, so we'll read from the next data fragment
- posInFragment := sfr.pos - sfr.sp[0].offset
- bytesLeft := sfr.sp[0].numBytes - posInFragment
- if int64(len(b)) > bytesLeft {
- b = b[0:bytesLeft]
- }
-
- n, err = sfr.rfr.Read(b)
- sfr.pos += int64(n)
-
- if int64(n) == bytesLeft {
- // We're done with this fragment
- sfr.sp = sfr.sp[1:]
- }
-
- if err == io.EOF && sfr.pos < sfr.tot {
- // We reached the end of the last fragment's data, but there's a final hole
- err = nil
- }
- return
-}
-
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
- return sfr.rfr.nb
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
deleted file mode 100644
index 9601ffe459..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
+++ /dev/null
@@ -1,743 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "bytes"
- "crypto/md5"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-type untarTest struct {
- file string
- headers []*Header
- cksums []string
-}
-
-var gnuTarTest = &untarTest{
- file: "testdata/gnu.tar",
- headers: []*Header{
- {
- Name: "small.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 5,
- ModTime: time.Unix(1244428340, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- {
- Name: "small2.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 11,
- ModTime: time.Unix(1244436044, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- },
- cksums: []string{
- "e38b27eaccb4391bdec553a7f3ae6b2f",
- "c65bd2e50a56a2138bf1716f2fd56fe9",
- },
-}
-
-var sparseTarTest = &untarTest{
- file: "testdata/sparse-formats.tar",
- headers: []*Header{
- {
- Name: "sparse-gnu",
- Mode: 420,
- Uid: 1000,
- Gid: 1000,
- Size: 200,
- ModTime: time.Unix(1392395740, 0),
- Typeflag: 0x53,
- Linkname: "",
- Uname: "david",
- Gname: "david",
- Devmajor: 0,
- Devminor: 0,
- },
- {
- Name: "sparse-posix-0.0",
- Mode: 420,
- Uid: 1000,
- Gid: 1000,
- Size: 200,
- ModTime: time.Unix(1392342187, 0),
- Typeflag: 0x30,
- Linkname: "",
- Uname: "david",
- Gname: "david",
- Devmajor: 0,
- Devminor: 0,
- },
- {
- Name: "sparse-posix-0.1",
- Mode: 420,
- Uid: 1000,
- Gid: 1000,
- Size: 200,
- ModTime: time.Unix(1392340456, 0),
- Typeflag: 0x30,
- Linkname: "",
- Uname: "david",
- Gname: "david",
- Devmajor: 0,
- Devminor: 0,
- },
- {
- Name: "sparse-posix-1.0",
- Mode: 420,
- Uid: 1000,
- Gid: 1000,
- Size: 200,
- ModTime: time.Unix(1392337404, 0),
- Typeflag: 0x30,
- Linkname: "",
- Uname: "david",
- Gname: "david",
- Devmajor: 0,
- Devminor: 0,
- },
- {
- Name: "end",
- Mode: 420,
- Uid: 1000,
- Gid: 1000,
- Size: 4,
- ModTime: time.Unix(1392398319, 0),
- Typeflag: 0x30,
- Linkname: "",
- Uname: "david",
- Gname: "david",
- Devmajor: 0,
- Devminor: 0,
- },
- },
- cksums: []string{
- "6f53234398c2449fe67c1812d993012f",
- "6f53234398c2449fe67c1812d993012f",
- "6f53234398c2449fe67c1812d993012f",
- "6f53234398c2449fe67c1812d993012f",
- "b0061974914468de549a2af8ced10316",
- },
-}
-
-var untarTests = []*untarTest{
- gnuTarTest,
- sparseTarTest,
- {
- file: "testdata/star.tar",
- headers: []*Header{
- {
- Name: "small.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 5,
- ModTime: time.Unix(1244592783, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- AccessTime: time.Unix(1244592783, 0),
- ChangeTime: time.Unix(1244592783, 0),
- },
- {
- Name: "small2.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 11,
- ModTime: time.Unix(1244592783, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- AccessTime: time.Unix(1244592783, 0),
- ChangeTime: time.Unix(1244592783, 0),
- },
- },
- },
- {
- file: "testdata/v7.tar",
- headers: []*Header{
- {
- Name: "small.txt",
- Mode: 0444,
- Uid: 73025,
- Gid: 5000,
- Size: 5,
- ModTime: time.Unix(1244593104, 0),
- Typeflag: '\x00',
- },
- {
- Name: "small2.txt",
- Mode: 0444,
- Uid: 73025,
- Gid: 5000,
- Size: 11,
- ModTime: time.Unix(1244593104, 0),
- Typeflag: '\x00',
- },
- },
- },
- {
- file: "testdata/pax.tar",
- headers: []*Header{
- {
- Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
- Mode: 0664,
- Uid: 1000,
- Gid: 1000,
- Uname: "shane",
- Gname: "shane",
- Size: 7,
- ModTime: time.Unix(1350244992, 23960108),
- ChangeTime: time.Unix(1350244992, 23960108),
- AccessTime: time.Unix(1350244992, 23960108),
- Typeflag: TypeReg,
- },
- {
- Name: "a/b",
- Mode: 0777,
- Uid: 1000,
- Gid: 1000,
- Uname: "shane",
- Gname: "shane",
- Size: 0,
- ModTime: time.Unix(1350266320, 910238425),
- ChangeTime: time.Unix(1350266320, 910238425),
- AccessTime: time.Unix(1350266320, 910238425),
- Typeflag: TypeSymlink,
- Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
- },
- },
- },
- {
- file: "testdata/nil-uid.tar", // golang.org/issue/5290
- headers: []*Header{
- {
- Name: "P1050238.JPG.log",
- Mode: 0664,
- Uid: 0,
- Gid: 0,
- Size: 14,
- ModTime: time.Unix(1365454838, 0),
- Typeflag: TypeReg,
- Linkname: "",
- Uname: "eyefi",
- Gname: "eyefi",
- Devmajor: 0,
- Devminor: 0,
- },
- },
- },
- {
- file: "testdata/xattrs.tar",
- headers: []*Header{
- {
- Name: "small.txt",
- Mode: 0644,
- Uid: 1000,
- Gid: 10,
- Size: 5,
- ModTime: time.Unix(1386065770, 448252320),
- Typeflag: '0',
- Uname: "alex",
- Gname: "wheel",
- AccessTime: time.Unix(1389782991, 419875220),
- ChangeTime: time.Unix(1389782956, 794414986),
- Xattrs: map[string]string{
- "user.key": "value",
- "user.key2": "value2",
- // Interestingly, selinux encodes the terminating null inside the xattr
- "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
- },
- },
- {
- Name: "small2.txt",
- Mode: 0644,
- Uid: 1000,
- Gid: 10,
- Size: 11,
- ModTime: time.Unix(1386065770, 449252304),
- Typeflag: '0',
- Uname: "alex",
- Gname: "wheel",
- AccessTime: time.Unix(1389782991, 419875220),
- ChangeTime: time.Unix(1386065770, 449252304),
- Xattrs: map[string]string{
- "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
- },
- },
- },
- },
-}
-
-func TestReader(t *testing.T) {
-testLoop:
- for i, test := range untarTests {
- f, err := os.Open(test.file)
- if err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- continue
- }
- defer f.Close()
- tr := NewReader(f)
- for j, header := range test.headers {
- hdr, err := tr.Next()
- if err != nil || hdr == nil {
- t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
- f.Close()
- continue testLoop
- }
- if !reflect.DeepEqual(*hdr, *header) {
- t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
- i, j, *hdr, *header)
- }
- }
- hdr, err := tr.Next()
- if err == io.EOF {
- continue testLoop
- }
- if hdr != nil || err != nil {
- t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
- }
- }
-}
-
-func TestPartialRead(t *testing.T) {
- f, err := os.Open("testdata/gnu.tar")
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- defer f.Close()
-
- tr := NewReader(f)
-
- // Read the first four bytes; Next() should skip the last byte.
- hdr, err := tr.Next()
- if err != nil || hdr == nil {
- t.Fatalf("Didn't get first file: %v", err)
- }
- buf := make([]byte, 4)
- if _, err := io.ReadFull(tr, buf); err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
- t.Errorf("Contents = %v, want %v", buf, expected)
- }
-
- // Second file
- hdr, err = tr.Next()
- if err != nil || hdr == nil {
- t.Fatalf("Didn't get second file: %v", err)
- }
- buf = make([]byte, 6)
- if _, err := io.ReadFull(tr, buf); err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- if expected := []byte("Google"); !bytes.Equal(buf, expected) {
- t.Errorf("Contents = %v, want %v", buf, expected)
- }
-}
-
-func TestIncrementalRead(t *testing.T) {
- test := gnuTarTest
- f, err := os.Open(test.file)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- defer f.Close()
-
- tr := NewReader(f)
-
- headers := test.headers
- cksums := test.cksums
- nread := 0
-
- // loop over all files
- for ; ; nread++ {
- hdr, err := tr.Next()
- if hdr == nil || err == io.EOF {
- break
- }
-
- // check the header
- if !reflect.DeepEqual(*hdr, *headers[nread]) {
- t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
- *hdr, headers[nread])
- }
-
- // read file contents in little chunks EOF,
- // checksumming all the way
- h := md5.New()
- rdbuf := make([]uint8, 8)
- for {
- nr, err := tr.Read(rdbuf)
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Errorf("Read: unexpected error %v\n", err)
- break
- }
- h.Write(rdbuf[0:nr])
- }
- // verify checksum
- have := fmt.Sprintf("%x", h.Sum(nil))
- want := cksums[nread]
- if want != have {
- t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
- }
- }
- if nread != len(headers) {
- t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
- }
-}
-
-func TestNonSeekable(t *testing.T) {
- test := gnuTarTest
- f, err := os.Open(test.file)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- defer f.Close()
-
- type readerOnly struct {
- io.Reader
- }
- tr := NewReader(readerOnly{f})
- nread := 0
-
- for ; ; nread++ {
- _, err := tr.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- }
-
- if nread != len(test.headers) {
- t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
- }
-}
-
-func TestParsePAXHeader(t *testing.T) {
- paxTests := [][3]string{
- {"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
- {"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
- {"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
- for _, test := range paxTests {
- key, expected, raw := test[0], test[1], test[2]
- reader := bytes.NewReader([]byte(raw))
- headers, err := parsePAX(reader)
- if err != nil {
- t.Errorf("Couldn't parse correctly formatted headers: %v", err)
- continue
- }
- if strings.EqualFold(headers[key], expected) {
- t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
- continue
- }
- trailer := make([]byte, 100)
- n, err := reader.Read(trailer)
- if err != io.EOF || n != 0 {
- t.Error("Buffer wasn't consumed")
- }
- }
- badHeader := bytes.NewReader([]byte("3 somelongkey="))
- if _, err := parsePAX(badHeader); err != ErrHeader {
- t.Fatal("Unexpected success when parsing bad header")
- }
-}
-
-func TestParsePAXTime(t *testing.T) {
- // Some valid PAX time values
- timestamps := map[string]time.Time{
- "1350244992.023960108": time.Unix(1350244992, 23960108), // The common case
- "1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
- "1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
- "1350244992": time.Unix(1350244992, 0), // Low precision value
- }
- for input, expected := range timestamps {
- ts, err := parsePAXTime(input)
- if err != nil {
- t.Fatal(err)
- }
- if !ts.Equal(expected) {
- t.Fatalf("Time parsing failure %s %s", ts, expected)
- }
- }
-}
-
-func TestMergePAX(t *testing.T) {
- hdr := new(Header)
- // Test a string, integer, and time based value.
- headers := map[string]string{
- "path": "a/b/c",
- "uid": "1000",
- "mtime": "1350244992.023960108",
- }
- err := mergePAX(hdr, headers)
- if err != nil {
- t.Fatal(err)
- }
- want := &Header{
- Name: "a/b/c",
- Uid: 1000,
- ModTime: time.Unix(1350244992, 23960108),
- }
- if !reflect.DeepEqual(hdr, want) {
- t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
- }
-}
-
-func TestSparseEndToEnd(t *testing.T) {
- test := sparseTarTest
- f, err := os.Open(test.file)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- defer f.Close()
-
- tr := NewReader(f)
-
- headers := test.headers
- cksums := test.cksums
- nread := 0
-
- // loop over all files
- for ; ; nread++ {
- hdr, err := tr.Next()
- if hdr == nil || err == io.EOF {
- break
- }
-
- // check the header
- if !reflect.DeepEqual(*hdr, *headers[nread]) {
- t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
- *hdr, headers[nread])
- }
-
- // read and checksum the file data
- h := md5.New()
- _, err = io.Copy(h, tr)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
-
- // verify checksum
- have := fmt.Sprintf("%x", h.Sum(nil))
- want := cksums[nread]
- if want != have {
- t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
- }
- }
- if nread != len(headers) {
- t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
- }
-}
-
-type sparseFileReadTest struct {
- sparseData []byte
- sparseMap []sparseEntry
- realSize int64
- expected []byte
-}
-
-var sparseFileReadTests = []sparseFileReadTest{
- {
- sparseData: []byte("abcde"),
- sparseMap: []sparseEntry{
- {offset: 0, numBytes: 2},
- {offset: 5, numBytes: 3},
- },
- realSize: 8,
- expected: []byte("ab\x00\x00\x00cde"),
- },
- {
- sparseData: []byte("abcde"),
- sparseMap: []sparseEntry{
- {offset: 0, numBytes: 2},
- {offset: 5, numBytes: 3},
- },
- realSize: 10,
- expected: []byte("ab\x00\x00\x00cde\x00\x00"),
- },
- {
- sparseData: []byte("abcde"),
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- },
- realSize: 8,
- expected: []byte("\x00abc\x00\x00de"),
- },
- {
- sparseData: []byte("abcde"),
- sparseMap: []sparseEntry{
- {offset: 1, numBytes: 3},
- {offset: 6, numBytes: 2},
- },
- realSize: 10,
- expected: []byte("\x00abc\x00\x00de\x00\x00"),
- },
- {
- sparseData: []byte(""),
- sparseMap: nil,
- realSize: 2,
- expected: []byte("\x00\x00"),
- },
-}
-
-func TestSparseFileReader(t *testing.T) {
- for i, test := range sparseFileReadTests {
- r := bytes.NewReader(test.sparseData)
- nb := int64(r.Len())
- sfr := &sparseFileReader{
- rfr: ®FileReader{r: r, nb: nb},
- sp: test.sparseMap,
- pos: 0,
- tot: test.realSize,
- }
- if sfr.numBytes() != nb {
- t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
- }
- buf, err := ioutil.ReadAll(sfr)
- if err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- }
- if e := test.expected; !bytes.Equal(buf, e) {
- t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
- }
- if sfr.numBytes() != 0 {
- t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
- }
- }
-}
-
-func TestSparseIncrementalRead(t *testing.T) {
- sparseMap := []sparseEntry{{10, 2}}
- sparseData := []byte("Go")
- expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
-
- r := bytes.NewReader(sparseData)
- nb := int64(r.Len())
- sfr := &sparseFileReader{
- rfr: ®FileReader{r: r, nb: nb},
- sp: sparseMap,
- pos: 0,
- tot: int64(len(expected)),
- }
-
- // We'll read the data 6 bytes at a time, with a hole of size 10 at
- // the beginning and one of size 8 at the end.
- var outputBuf bytes.Buffer
- buf := make([]byte, 6)
- for {
- n, err := sfr.Read(buf)
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Errorf("Read: unexpected error %v\n", err)
- }
- if n > 0 {
- _, err := outputBuf.Write(buf[:n])
- if err != nil {
- t.Errorf("Write: unexpected error %v\n", err)
- }
- }
- }
- got := outputBuf.String()
- if got != expected {
- t.Errorf("Contents = %v, want %v", got, expected)
- }
-}
-
-func TestReadGNUSparseMap0x1(t *testing.T) {
- headers := map[string]string{
- paxGNUSparseNumBlocks: "4",
- paxGNUSparseMap: "0,5,10,5,20,5,30,5",
- }
- expected := []sparseEntry{
- {offset: 0, numBytes: 5},
- {offset: 10, numBytes: 5},
- {offset: 20, numBytes: 5},
- {offset: 30, numBytes: 5},
- }
-
- sp, err := readGNUSparseMap0x1(headers)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !reflect.DeepEqual(sp, expected) {
- t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
- }
-}
-
-func TestReadGNUSparseMap1x0(t *testing.T) {
- // This test uses lots of holes so the sparse header takes up more than two blocks
- numEntries := 100
- expected := make([]sparseEntry, 0, numEntries)
- sparseMap := new(bytes.Buffer)
-
- fmt.Fprintf(sparseMap, "%d\n", numEntries)
- for i := 0; i < numEntries; i++ {
- offset := int64(2048 * i)
- numBytes := int64(1024)
- expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
- fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
- }
-
- // Make the header the smallest multiple of blockSize that fits the sparseMap
- headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
- bufLen := blockSize * headerBlocks
- buf := make([]byte, bufLen)
- copy(buf, sparseMap.Bytes())
-
- // Get an reader to read the sparse map
- r := bytes.NewReader(buf)
-
- // Read the sparse map
- sp, err := readGNUSparseMap1x0(r)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if !reflect.DeepEqual(sp, expected) {
- t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
- }
-}
-
-func TestUninitializedRead(t *testing.T) {
- test := gnuTarTest
- f, err := os.Open(test.file)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
- defer f.Close()
-
- tr := NewReader(f)
- _, err = tr.Read([]byte{})
- if err == nil || err != io.EOF {
- t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
- }
-
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
deleted file mode 100644
index cf9cc79c59..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux dragonfly openbsd solaris
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctim.Unix())
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
deleted file mode 100644
index 6f17dbe307..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd netbsd
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctimespec.Unix())
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
deleted file mode 100644
index cb843db4cf..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris
-
-package tar
-
-import (
- "os"
- "syscall"
-)
-
-func init() {
- sysStat = statUnix
-}
-
-func statUnix(fi os.FileInfo, h *Header) error {
- sys, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
- }
- h.Uid = int(sys.Uid)
- h.Gid = int(sys.Gid)
- // TODO(bradfitz): populate username & group. os/user
- // doesn't cache LookupId lookups, and lacks group
- // lookup functions.
- h.AccessTime = statAtime(sys)
- h.ChangeTime = statCtime(sys)
- // TODO(bradfitz): major/minor device numbers?
- return nil
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
deleted file mode 100644
index ed333f3ea4..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "path"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-func TestFileInfoHeader(t *testing.T) {
- fi, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
- h, err := FileInfoHeader(fi, "")
- if err != nil {
- t.Fatalf("FileInfoHeader: %v", err)
- }
- if g, e := h.Name, "small.txt"; g != e {
- t.Errorf("Name = %q; want %q", g, e)
- }
- if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
- t.Errorf("Mode = %#o; want %#o", g, e)
- }
- if g, e := h.Size, int64(5); g != e {
- t.Errorf("Size = %v; want %v", g, e)
- }
- if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
- t.Errorf("ModTime = %v; want %v", g, e)
- }
- // FileInfoHeader should error when passing nil FileInfo
- if _, err := FileInfoHeader(nil, ""); err == nil {
- t.Fatalf("Expected error when passing nil to FileInfoHeader")
- }
-}
-
-func TestFileInfoHeaderDir(t *testing.T) {
- fi, err := os.Stat("testdata")
- if err != nil {
- t.Fatal(err)
- }
- h, err := FileInfoHeader(fi, "")
- if err != nil {
- t.Fatalf("FileInfoHeader: %v", err)
- }
- if g, e := h.Name, "testdata/"; g != e {
- t.Errorf("Name = %q; want %q", g, e)
- }
- // Ignoring c_ISGID for golang.org/issue/4867
- if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
- t.Errorf("Mode = %#o; want %#o", g, e)
- }
- if g, e := h.Size, int64(0); g != e {
- t.Errorf("Size = %v; want %v", g, e)
- }
- if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
- t.Errorf("ModTime = %v; want %v", g, e)
- }
-}
-
-func TestFileInfoHeaderSymlink(t *testing.T) {
- h, err := FileInfoHeader(symlink{}, "some-target")
- if err != nil {
- t.Fatal(err)
- }
- if g, e := h.Name, "some-symlink"; g != e {
- t.Errorf("Name = %q; want %q", g, e)
- }
- if g, e := h.Linkname, "some-target"; g != e {
- t.Errorf("Linkname = %q; want %q", g, e)
- }
-}
-
-type symlink struct{}
-
-func (symlink) Name() string { return "some-symlink" }
-func (symlink) Size() int64 { return 0 }
-func (symlink) Mode() os.FileMode { return os.ModeSymlink }
-func (symlink) ModTime() time.Time { return time.Time{} }
-func (symlink) IsDir() bool { return false }
-func (symlink) Sys() interface{} { return nil }
-
-func TestRoundTrip(t *testing.T) {
- data := []byte("some file contents")
-
- var b bytes.Buffer
- tw := NewWriter(&b)
- hdr := &Header{
- Name: "file.txt",
- Uid: 1 << 21, // too big for 8 octal digits
- Size: int64(len(data)),
- ModTime: time.Now(),
- }
- // tar only supports second precision.
- hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
- if err := tw.WriteHeader(hdr); err != nil {
- t.Fatalf("tw.WriteHeader: %v", err)
- }
- if _, err := tw.Write(data); err != nil {
- t.Fatalf("tw.Write: %v", err)
- }
- if err := tw.Close(); err != nil {
- t.Fatalf("tw.Close: %v", err)
- }
-
- // Read it back.
- tr := NewReader(&b)
- rHdr, err := tr.Next()
- if err != nil {
- t.Fatalf("tr.Next: %v", err)
- }
- if !reflect.DeepEqual(rHdr, hdr) {
- t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
- }
- rData, err := ioutil.ReadAll(tr)
- if err != nil {
- t.Fatalf("Read: %v", err)
- }
- if !bytes.Equal(rData, data) {
- t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
- }
-}
-
-type headerRoundTripTest struct {
- h *Header
- fm os.FileMode
-}
-
-func TestHeaderRoundTrip(t *testing.T) {
- golden := []headerRoundTripTest{
- // regular file.
- {
- h: &Header{
- Name: "test.txt",
- Mode: 0644 | c_ISREG,
- Size: 12,
- ModTime: time.Unix(1360600916, 0),
- Typeflag: TypeReg,
- },
- fm: 0644,
- },
- // hard link.
- {
- h: &Header{
- Name: "hard.txt",
- Mode: 0644 | c_ISLNK,
- Size: 0,
- ModTime: time.Unix(1360600916, 0),
- Typeflag: TypeLink,
- },
- fm: 0644 | os.ModeSymlink,
- },
- // symbolic link.
- {
- h: &Header{
- Name: "link.txt",
- Mode: 0777 | c_ISLNK,
- Size: 0,
- ModTime: time.Unix(1360600852, 0),
- Typeflag: TypeSymlink,
- },
- fm: 0777 | os.ModeSymlink,
- },
- // character device node.
- {
- h: &Header{
- Name: "dev/null",
- Mode: 0666 | c_ISCHR,
- Size: 0,
- ModTime: time.Unix(1360578951, 0),
- Typeflag: TypeChar,
- },
- fm: 0666 | os.ModeDevice | os.ModeCharDevice,
- },
- // block device node.
- {
- h: &Header{
- Name: "dev/sda",
- Mode: 0660 | c_ISBLK,
- Size: 0,
- ModTime: time.Unix(1360578954, 0),
- Typeflag: TypeBlock,
- },
- fm: 0660 | os.ModeDevice,
- },
- // directory.
- {
- h: &Header{
- Name: "dir/",
- Mode: 0755 | c_ISDIR,
- Size: 0,
- ModTime: time.Unix(1360601116, 0),
- Typeflag: TypeDir,
- },
- fm: 0755 | os.ModeDir,
- },
- // fifo node.
- {
- h: &Header{
- Name: "dev/initctl",
- Mode: 0600 | c_ISFIFO,
- Size: 0,
- ModTime: time.Unix(1360578949, 0),
- Typeflag: TypeFifo,
- },
- fm: 0600 | os.ModeNamedPipe,
- },
- // setuid.
- {
- h: &Header{
- Name: "bin/su",
- Mode: 0755 | c_ISREG | c_ISUID,
- Size: 23232,
- ModTime: time.Unix(1355405093, 0),
- Typeflag: TypeReg,
- },
- fm: 0755 | os.ModeSetuid,
- },
- // setguid.
- {
- h: &Header{
- Name: "group.txt",
- Mode: 0750 | c_ISREG | c_ISGID,
- Size: 0,
- ModTime: time.Unix(1360602346, 0),
- Typeflag: TypeReg,
- },
- fm: 0750 | os.ModeSetgid,
- },
- // sticky.
- {
- h: &Header{
- Name: "sticky.txt",
- Mode: 0600 | c_ISREG | c_ISVTX,
- Size: 7,
- ModTime: time.Unix(1360602540, 0),
- Typeflag: TypeReg,
- },
- fm: 0600 | os.ModeSticky,
- },
- }
-
- for i, g := range golden {
- fi := g.h.FileInfo()
- h2, err := FileInfoHeader(fi, "")
- if err != nil {
- t.Error(err)
- continue
- }
- if strings.Contains(fi.Name(), "/") {
- t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
- }
- name := path.Base(g.h.Name)
- if fi.IsDir() {
- name += "/"
- }
- if got, want := h2.Name, name; got != want {
- t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
- }
- if got, want := h2.Size, g.h.Size; got != want {
- t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
- }
- if got, want := h2.Mode, g.h.Mode; got != want {
- t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
- }
- if got, want := fi.Mode(), g.fm; got != want {
- t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
- }
- if got, want := h2.ModTime, g.h.ModTime; got != want {
- t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
- }
- if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
- t.Errorf("i=%d: Sys didn't return original *Header", i)
- }
- }
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
deleted file mode 100644
index fc899dc8dc..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
deleted file mode 100644
index cc9cfaa33c..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
deleted file mode 100644
index 9bc24b6587..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
deleted file mode 100644
index b249bfc518..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
+++ /dev/null
@@ -1 +0,0 @@
-Kilts
\ No newline at end of file
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
deleted file mode 100644
index 394ee3ecd0..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
+++ /dev/null
@@ -1 +0,0 @@
-Google.com
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
deleted file mode 100644
index 8bd4e74d50..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
deleted file mode 100644
index 59e2d4e604..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
deleted file mode 100644
index 29679d9a30..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
deleted file mode 100644
index eb65fc9410..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
deleted file mode 100644
index 5960ee8247..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
deleted file mode 100644
index 753e883ceb..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
deleted file mode 100644
index e6d816ad07..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
deleted file mode 100644
index 9701950edd..0000000000
Binary files a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar and /dev/null differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
deleted file mode 100644
index dafb2cabf3..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "path"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- ErrWriteTooLong = errors.New("archive/tar: write too long")
- ErrFieldTooLong = errors.New("archive/tar: header field too long")
- ErrWriteAfterClose = errors.New("archive/tar: write after close")
- errNameTooLong = errors.New("archive/tar: name too long")
- errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
-type Writer struct {
- w io.Writer
- err error
- nb int64 // number of unwritten bytes for current file entry
- pad int64 // amount of padding to write after current file entry
- closed bool
- usedBinary bool // whether the binary numeric field extension was used
- preferPax bool // use pax header instead of binary numeric header
- hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
- paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
-
-// Flush finishes writing the current file (optional).
-func (tw *Writer) Flush() error {
- if tw.nb > 0 {
- tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
- return tw.err
- }
-
- n := tw.nb + tw.pad
- for n > 0 && tw.err == nil {
- nr := n
- if nr > blockSize {
- nr = blockSize
- }
- var nw int
- nw, tw.err = tw.w.Write(zeroBlock[0:nr])
- n -= int64(nw)
- }
- tw.nb = 0
- tw.pad = 0
- return tw.err
-}
-
-// Write s into b, terminating it with a NUL if there is room.
-// If the value is too long for the field and allowPax is true add a paxheader record instead
-func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
- needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
- if needsPaxHeader {
- paxHeaders[paxKeyword] = s
- return
- }
- if len(s) > len(b) {
- if tw.err == nil {
- tw.err = ErrFieldTooLong
- }
- return
- }
- ascii := toASCII(s)
- copy(b, ascii)
- if len(ascii) < len(b) {
- b[len(ascii)] = 0
- }
-}
-
-// Encode x as an octal ASCII string and write it into b with leading zeros.
-func (tw *Writer) octal(b []byte, x int64) {
- s := strconv.FormatInt(x, 8)
- // leading zeros, but leave room for a NUL.
- for len(s)+1 < len(b) {
- s = "0" + s
- }
- tw.cString(b, s, false, paxNone, nil)
-}
-
-// Write x into b, either as octal or as binary (GNUtar/star extension).
-// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
-func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
- // Try octal first.
- s := strconv.FormatInt(x, 8)
- if len(s) < len(b) {
- tw.octal(b, x)
- return
- }
-
- // If it is too long for octal, and pax is preferred, use a pax header
- if allowPax && tw.preferPax {
- tw.octal(b, 0)
- s := strconv.FormatInt(x, 10)
- paxHeaders[paxKeyword] = s
- return
- }
-
- // Too big: use binary (big-endian).
- tw.usedBinary = true
- for i := len(b) - 1; x > 0 && i >= 0; i-- {
- b[i] = byte(x)
- x >>= 8
- }
- b[0] |= 0x80 // highest bit indicates binary format
-}
-
-var (
- minTime = time.Unix(0, 0)
- // There is room for 11 octal digits (33 bits) of mtime.
- maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
- return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
- if tw.closed {
- return ErrWriteAfterClose
- }
- if tw.err == nil {
- tw.Flush()
- }
- if tw.err != nil {
- return tw.err
- }
-
- // a map to hold pax header records, if any are needed
- paxHeaders := make(map[string]string)
-
- // TODO(shanemhansen): we might want to use PAX headers for
- // subsecond time resolution, but for now let's just capture
- // too long fields or non ascii characters
-
- var header []byte
-
- // We need to select which scratch buffer to use carefully,
- // since this method is called recursively to write PAX headers.
- // If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
- // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
- // already being used by the non-recursive call, so we must use paxHdrBuff.
- header = tw.hdrBuff[:]
- if !allowPax {
- header = tw.paxHdrBuff[:]
- }
- copy(header, zeroBlock)
- s := slicer(header)
-
- // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- pathHeaderBytes := s.next(fileNameSize)
-
- tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
-
- // Handle out of range ModTime carefully.
- var modTime int64
- if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
- modTime = hdr.ModTime.Unix()
- }
-
- tw.octal(s.next(8), hdr.Mode) // 100:108
- tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
- tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
- tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
- tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
- s.next(8) // chksum (148:156)
- s.next(1)[0] = hdr.Typeflag // 156:157
-
- tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
-
- copy(s.next(8), []byte("ustar\x0000")) // 257:265
- tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
- tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
- tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
- tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
-
- // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
- prefixHeaderBytes := s.next(155)
- tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
-
- // Use the GNU magic instead of POSIX magic if we used any GNU extensions.
- if tw.usedBinary {
- copy(header[257:265], []byte("ustar \x00"))
- }
-
- _, paxPathUsed := paxHeaders[paxPath]
- // try to use a ustar header when only the name is too long
- if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
- suffix := hdr.Name
- prefix := ""
- if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
- var err error
- prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
- if err == nil {
- // ok we can use a ustar long name instead of pax, now correct the fields
-
- // remove the path field from the pax header. this will suppress the pax header
- delete(paxHeaders, paxPath)
-
- // update the path fields
- tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
- tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
-
- // Use the ustar magic if we used ustar long names.
- if len(prefix) > 0 && !tw.usedBinary {
- copy(header[257:265], []byte("ustar\x00"))
- }
- }
- }
- }
-
- // The chksum field is terminated by a NUL and a space.
- // This is different from the other octal fields.
- chksum, _ := checksum(header)
- tw.octal(header[148:155], chksum)
- header[155] = ' '
-
- if tw.err != nil {
- // problem with header; probably integer too big for a field.
- return tw.err
- }
-
- if allowPax {
- for k, v := range hdr.Xattrs {
- paxHeaders[paxXattr+k] = v
- }
- }
-
- if len(paxHeaders) > 0 {
- if !allowPax {
- return errInvalidHeader
- }
- if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
- return err
- }
- }
- tw.nb = int64(hdr.Size)
- tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
-
- _, tw.err = tw.w.Write(header)
- return tw.err
-}
-
-// writeUSTARLongName splits a USTAR long name hdr.Name.
-// name must be < 256 characters. errNameTooLong is returned
-// if hdr.Name can't be split. The splitting heuristic
-// is compatible with gnu tar.
-func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
- length := len(name)
- if length > fileNamePrefixSize+1 {
- length = fileNamePrefixSize + 1
- } else if name[length-1] == '/' {
- length--
- }
- i := strings.LastIndex(name[:length], "/")
- // nlen contains the resulting length in the name field.
- // plen contains the resulting length in the prefix field.
- nlen := len(name) - i - 1
- plen := i
- if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
- err = errNameTooLong
- return
- }
- prefix, suffix = name[:i], name[i+1:]
- return
-}
-
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
- // Prepare extended header
- ext := new(Header)
- ext.Typeflag = TypeXHeader
- // Setting ModTime is required for reader parsing to
- // succeed, and seems harmless enough.
- ext.ModTime = hdr.ModTime
- // The spec asks that we namespace our pseudo files
- // with the current pid.
- pid := os.Getpid()
- dir, file := path.Split(hdr.Name)
- fullName := path.Join(dir,
- fmt.Sprintf("PaxHeaders.%d", pid), file)
-
- ascii := toASCII(fullName)
- if len(ascii) > 100 {
- ascii = ascii[:100]
- }
- ext.Name = ascii
- // Construct the body
- var buf bytes.Buffer
-
- for k, v := range paxHeaders {
- fmt.Fprint(&buf, paxHeader(k+"="+v))
- }
-
- ext.Size = int64(len(buf.Bytes()))
- if err := tw.writeHeader(ext, false); err != nil {
- return err
- }
- if _, err := tw.Write(buf.Bytes()); err != nil {
- return err
- }
- if err := tw.Flush(); err != nil {
- return err
- }
- return nil
-}
-
-// paxHeader formats a single pax record, prefixing it with the appropriate length
-func paxHeader(msg string) string {
- const padding = 2 // Extra padding for space and newline
- size := len(msg) + padding
- size += len(strconv.Itoa(size))
- record := fmt.Sprintf("%d %s\n", size, msg)
- if len(record) != size {
- // Final adjustment if adding size increased
- // the number of digits in size
- size = len(record)
- record = fmt.Sprintf("%d %s\n", size, msg)
- }
- return record
-}
-
-// Write writes to the current entry in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
- if tw.closed {
- err = ErrWriteTooLong
- return
- }
- overwrite := false
- if int64(len(b)) > tw.nb {
- b = b[0:tw.nb]
- overwrite = true
- }
- n, err = tw.w.Write(b)
- tw.nb -= int64(n)
- if err == nil && overwrite {
- err = ErrWriteTooLong
- return
- }
- tw.err = err
- return
-}
-
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
- if tw.err != nil || tw.closed {
- return tw.err
- }
- tw.Flush()
- tw.closed = true
- if tw.err != nil {
- return tw.err
- }
-
- // trailer: two zero blocks
- for i := 0; i < 2; i++ {
- _, tw.err = tw.w.Write(zeroBlock)
- if tw.err != nil {
- break
- }
- }
- return tw.err
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
deleted file mode 100644
index 5e42e322f9..0000000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
+++ /dev/null
@@ -1,491 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "reflect"
- "strings"
- "testing"
- "testing/iotest"
- "time"
-)
-
-type writerTestEntry struct {
- header *Header
- contents string
-}
-
-type writerTest struct {
- file string // filename of expected output
- entries []*writerTestEntry
-}
-
-var writerTests = []*writerTest{
- // The writer test file was produced with this command:
- // tar (GNU tar) 1.26
- // ln -s small.txt link.txt
- // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
- {
- file: "testdata/writer.tar",
- entries: []*writerTestEntry{
- {
- header: &Header{
- Name: "small.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 5,
- ModTime: time.Unix(1246508266, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- contents: "Kilts",
- },
- {
- header: &Header{
- Name: "small2.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 11,
- ModTime: time.Unix(1245217492, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- contents: "Google.com\n",
- },
- {
- header: &Header{
- Name: "link.txt",
- Mode: 0777,
- Uid: 1000,
- Gid: 1000,
- Size: 0,
- ModTime: time.Unix(1314603082, 0),
- Typeflag: '2',
- Linkname: "small.txt",
- Uname: "strings",
- Gname: "strings",
- },
- // no contents
- },
- },
- },
- // The truncated test file was produced using these commands:
- // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
- // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
- {
- file: "testdata/writer-big.tar",
- entries: []*writerTestEntry{
- {
- header: &Header{
- Name: "tmp/16gig.txt",
- Mode: 0640,
- Uid: 73025,
- Gid: 5000,
- Size: 16 << 30,
- ModTime: time.Unix(1254699560, 0),
- Typeflag: '0',
- Uname: "dsymonds",
- Gname: "eng",
- },
- // fake contents
- contents: strings.Repeat("\x00", 4<<10),
- },
- },
- },
- // The truncated test file was produced using these commands:
- // dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
- // tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
- {
- file: "testdata/writer-big-long.tar",
- entries: []*writerTestEntry{
- {
- header: &Header{
- Name: strings.Repeat("longname/", 15) + "16gig.txt",
- Mode: 0644,
- Uid: 1000,
- Gid: 1000,
- Size: 16 << 30,
- ModTime: time.Unix(1399583047, 0),
- Typeflag: '0',
- Uname: "guillaume",
- Gname: "guillaume",
- },
- // fake contents
- contents: strings.Repeat("\x00", 4<<10),
- },
- },
- },
- // This file was produced using gnu tar 1.17
- // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
- {
- file: "testdata/ustar.tar",
- entries: []*writerTestEntry{
- {
- header: &Header{
- Name: strings.Repeat("longname/", 15) + "file.txt",
- Mode: 0644,
- Uid: 0765,
- Gid: 024,
- Size: 06,
- ModTime: time.Unix(1360135598, 0),
- Typeflag: '0',
- Uname: "shane",
- Gname: "staff",
- },
- contents: "hello\n",
- },
- },
- },
-}
-
-// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
-func bytestr(offset int, b []byte) string {
- const rowLen = 32
- s := fmt.Sprintf("%04x ", offset)
- for _, ch := range b {
- switch {
- case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
- s += fmt.Sprintf(" %c", ch)
- default:
- s += fmt.Sprintf(" %02x", ch)
- }
- }
- return s
-}
-
-// Render a pseudo-diff between two blocks of bytes.
-func bytediff(a []byte, b []byte) string {
- const rowLen = 32
- s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
- for offset := 0; len(a)+len(b) > 0; offset += rowLen {
- na, nb := rowLen, rowLen
- if na > len(a) {
- na = len(a)
- }
- if nb > len(b) {
- nb = len(b)
- }
- sa := bytestr(offset, a[0:na])
- sb := bytestr(offset, b[0:nb])
- if sa != sb {
- s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
- }
- a = a[na:]
- b = b[nb:]
- }
- return s
-}
-
-func TestWriter(t *testing.T) {
-testLoop:
- for i, test := range writerTests {
- expected, err := ioutil.ReadFile(test.file)
- if err != nil {
- t.Errorf("test %d: Unexpected error: %v", i, err)
- continue
- }
-
- buf := new(bytes.Buffer)
- tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
- big := false
- for j, entry := range test.entries {
- big = big || entry.header.Size > 1<<10
- if err := tw.WriteHeader(entry.header); err != nil {
- t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
- continue testLoop
- }
- if _, err := io.WriteString(tw, entry.contents); err != nil {
- t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
- continue testLoop
- }
- }
- // Only interested in Close failures for the small tests.
- if err := tw.Close(); err != nil && !big {
- t.Errorf("test %d: Failed closing archive: %v", i, err)
- continue testLoop
- }
-
- actual := buf.Bytes()
- if !bytes.Equal(expected, actual) {
- t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
- i, bytediff(expected, actual))
- }
- if testing.Short() { // The second test is expensive.
- break
- }
- }
-}
-
-func TestPax(t *testing.T) {
- // Create an archive with a large name
- fileinfo, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
- hdr, err := FileInfoHeader(fileinfo, "")
- if err != nil {
- t.Fatalf("os.Stat: %v", err)
- }
- // Force a PAX long name to be written
- longName := strings.Repeat("ab", 100)
- contents := strings.Repeat(" ", int(hdr.Size))
- hdr.Name = longName
- var buf bytes.Buffer
- writer := NewWriter(&buf)
- if err := writer.WriteHeader(hdr); err != nil {
- t.Fatal(err)
- }
- if _, err = writer.Write([]byte(contents)); err != nil {
- t.Fatal(err)
- }
- if err := writer.Close(); err != nil {
- t.Fatal(err)
- }
- // Simple test to make sure PAX extensions are in effect
- if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
- t.Fatal("Expected at least one PAX header to be written.")
- }
- // Test that we can get a long name back out of the archive.
- reader := NewReader(&buf)
- hdr, err = reader.Next()
- if err != nil {
- t.Fatal(err)
- }
- if hdr.Name != longName {
- t.Fatal("Couldn't recover long file name")
- }
-}
-
-func TestPaxSymlink(t *testing.T) {
- // Create an archive with a large linkname
- fileinfo, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
- hdr, err := FileInfoHeader(fileinfo, "")
- hdr.Typeflag = TypeSymlink
- if err != nil {
- t.Fatalf("os.Stat:1 %v", err)
- }
- // Force a PAX long linkname to be written
- longLinkname := strings.Repeat("1234567890/1234567890", 10)
- hdr.Linkname = longLinkname
-
- hdr.Size = 0
- var buf bytes.Buffer
- writer := NewWriter(&buf)
- if err := writer.WriteHeader(hdr); err != nil {
- t.Fatal(err)
- }
- if err := writer.Close(); err != nil {
- t.Fatal(err)
- }
- // Simple test to make sure PAX extensions are in effect
- if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
- t.Fatal("Expected at least one PAX header to be written.")
- }
- // Test that we can get a long name back out of the archive.
- reader := NewReader(&buf)
- hdr, err = reader.Next()
- if err != nil {
- t.Fatal(err)
- }
- if hdr.Linkname != longLinkname {
- t.Fatal("Couldn't recover long link name")
- }
-}
-
-func TestPaxNonAscii(t *testing.T) {
- // Create an archive with non ascii. These should trigger a pax header
- // because pax headers have a defined utf-8 encoding.
- fileinfo, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
-
- hdr, err := FileInfoHeader(fileinfo, "")
- if err != nil {
- t.Fatalf("os.Stat:1 %v", err)
- }
-
- // some sample data
- chineseFilename := "文件名"
- chineseGroupname := "組"
- chineseUsername := "用戶名"
-
- hdr.Name = chineseFilename
- hdr.Gname = chineseGroupname
- hdr.Uname = chineseUsername
-
- contents := strings.Repeat(" ", int(hdr.Size))
-
- var buf bytes.Buffer
- writer := NewWriter(&buf)
- if err := writer.WriteHeader(hdr); err != nil {
- t.Fatal(err)
- }
- if _, err = writer.Write([]byte(contents)); err != nil {
- t.Fatal(err)
- }
- if err := writer.Close(); err != nil {
- t.Fatal(err)
- }
- // Simple test to make sure PAX extensions are in effect
- if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
- t.Fatal("Expected at least one PAX header to be written.")
- }
- // Test that we can get a long name back out of the archive.
- reader := NewReader(&buf)
- hdr, err = reader.Next()
- if err != nil {
- t.Fatal(err)
- }
- if hdr.Name != chineseFilename {
- t.Fatal("Couldn't recover unicode name")
- }
- if hdr.Gname != chineseGroupname {
- t.Fatal("Couldn't recover unicode group")
- }
- if hdr.Uname != chineseUsername {
- t.Fatal("Couldn't recover unicode user")
- }
-}
-
-func TestPaxXattrs(t *testing.T) {
- xattrs := map[string]string{
- "user.key": "value",
- }
-
- // Create an archive with an xattr
- fileinfo, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
- hdr, err := FileInfoHeader(fileinfo, "")
- if err != nil {
- t.Fatalf("os.Stat: %v", err)
- }
- contents := "Kilts"
- hdr.Xattrs = xattrs
- var buf bytes.Buffer
- writer := NewWriter(&buf)
- if err := writer.WriteHeader(hdr); err != nil {
- t.Fatal(err)
- }
- if _, err = writer.Write([]byte(contents)); err != nil {
- t.Fatal(err)
- }
- if err := writer.Close(); err != nil {
- t.Fatal(err)
- }
- // Test that we can get the xattrs back out of the archive.
- reader := NewReader(&buf)
- hdr, err = reader.Next()
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
- t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
- hdr.Xattrs, xattrs)
- }
-}
-
-func TestPAXHeader(t *testing.T) {
- medName := strings.Repeat("CD", 50)
- longName := strings.Repeat("AB", 100)
- paxTests := [][2]string{
- {paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
- {"a=b", "6 a=b\n"}, // Single digit length
- {"a=names", "11 a=names\n"}, // Test case involving carries
- {paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
- {paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
-
- for _, test := range paxTests {
- key, expected := test[0], test[1]
- if result := paxHeader(key); result != expected {
- t.Fatalf("paxHeader: got %s, expected %s", result, expected)
- }
- }
-}
-
-func TestUSTARLongName(t *testing.T) {
- // Create an archive with a path that failed to split with USTAR extension in previous versions.
- fileinfo, err := os.Stat("testdata/small.txt")
- if err != nil {
- t.Fatal(err)
- }
- hdr, err := FileInfoHeader(fileinfo, "")
- hdr.Typeflag = TypeDir
- if err != nil {
- t.Fatalf("os.Stat:1 %v", err)
- }
- // Force a PAX long name to be written. The name was taken from a practical example
- // that fails and replaced ever char through numbers to anonymize the sample.
- longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
- hdr.Name = longName
-
- hdr.Size = 0
- var buf bytes.Buffer
- writer := NewWriter(&buf)
- if err := writer.WriteHeader(hdr); err != nil {
- t.Fatal(err)
- }
- if err := writer.Close(); err != nil {
- t.Fatal(err)
- }
- // Test that we can get a long name back out of the archive.
- reader := NewReader(&buf)
- hdr, err = reader.Next()
- if err != nil {
- t.Fatal(err)
- }
- if hdr.Name != longName {
- t.Fatal("Couldn't recover long name")
- }
-}
-
-func TestValidTypeflagWithPAXHeader(t *testing.T) {
- var buffer bytes.Buffer
- tw := NewWriter(&buffer)
-
- fileName := strings.Repeat("ab", 100)
-
- hdr := &Header{
- Name: fileName,
- Size: 4,
- Typeflag: 0,
- }
- if err := tw.WriteHeader(hdr); err != nil {
- t.Fatalf("Failed to write header: %s", err)
- }
- if _, err := tw.Write([]byte("fooo")); err != nil {
- t.Fatalf("Failed to write the file's data: %s", err)
- }
- tw.Close()
-
- tr := NewReader(&buffer)
-
- for {
- header, err := tr.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- t.Fatalf("Failed to read header: %s", err)
- }
- if header.Typeflag != 0 {
- t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
- }
- }
-}
diff --git a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
index 566a6fbd9d..eb72bff93b 100644
--- a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
+++ b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,7 @@
+# 0.7.3
+
+formatter/\*: allow configuration of timestamp layout
+
# 0.7.2
formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md
index bf09541e83..d55f909247 100644
--- a/vendor/src/github.com/Sirupsen/logrus/README.md
+++ b/vendor/src/github.com/Sirupsen/logrus/README.md
@@ -108,6 +108,16 @@ func main() {
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
}
```
@@ -189,31 +199,18 @@ func init() {
}
```
-* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
- Send errors to an exception tracking service compatible with the Airbrake API.
- Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
-* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
- Send errors to the Papertrail hosted logging service via UDP.
-
-* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
- Send errors to remote syslog server.
- Uses standard library `log/syslog` behind the scenes.
-
-* [`github.com/Sirupsen/logrus/hooks/bugsnag`](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go)
- Send errors to the Bugsnag exception tracking service.
-
-* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
- Send errors to a channel in hipchat.
-
-* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
- Send logs to Loggly (https://www.loggly.com/)
-
-* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
- Hook for Slack chat.
-
-* [`github.com/wercker/journalhook`](https://github.com/wercker/journalhook).
- Hook for logging to `systemd-journald`.
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
#### Level logging
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go
index 038ce9fd29..104d689f18 100644
--- a/vendor/src/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go
@@ -1,5 +1,9 @@
package logrus
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
index 34b1ccbca6..8ea93ddf20 100644
--- a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
+++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
@@ -3,19 +3,27 @@ package logstash
import (
"encoding/json"
"fmt"
+
"github.com/Sirupsen/logrus"
- "time"
)
// Formatter generates json in logstash format.
// Logstash site: http://logstash.net/
type LogstashFormatter struct {
Type string // if not empty use for logstash type field.
+
+ // TimestampFormat sets the format used for timestamps.
+ TimestampFormat string
}
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
entry.Data["@version"] = 1
- entry.Data["@timestamp"] = entry.Time.Format(time.RFC3339)
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = logrus.DefaultTimestampFormat
+ }
+
+ entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
// set message field
v, ok := entry.Data["message"]
diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
index 5c4c44bbe5..dcc4f1d9fd 100644
--- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -3,10 +3,12 @@ package logrus
import (
"encoding/json"
"fmt"
- "time"
)
-type JSONFormatter struct{}
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
@@ -21,7 +23,12 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
}
}
prefixFieldClashes(data)
- data["time"] = entry.Time.Format(time.RFC3339)
+
+ if f.TimestampFormat == "" {
+ f.TimestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(f.TimestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
index d3687ba25c..612417ff9c 100644
--- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -18,9 +18,8 @@ const (
)
var (
- baseTimestamp time.Time
- isTerminal bool
- defaultTimestampFormat = time.RFC3339
+ baseTimestamp time.Time
+ isTerminal bool
)
func init() {
@@ -47,7 +46,7 @@ type TextFormatter struct {
// the time passed since beginning of execution.
FullTimestamp bool
- // Timestamp format to use for display, if a full timestamp is printed
+ // TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
@@ -73,7 +72,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
if f.TimestampFormat == "" {
- f.TimestampFormat = defaultTimestampFormat
+ f.TimestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys)
diff --git a/vendor/src/github.com/docker/libcontainer/.gitignore b/vendor/src/github.com/docker/libcontainer/.gitignore
index bf6a664db6..2e3f79b4eb 100644
--- a/vendor/src/github.com/docker/libcontainer/.gitignore
+++ b/vendor/src/github.com/docker/libcontainer/.gitignore
@@ -1,2 +1,3 @@
bundles
nsinit/nsinit
+vendor/pkg
diff --git a/vendor/src/github.com/docker/libcontainer/SPEC.md b/vendor/src/github.com/docker/libcontainer/SPEC.md
index d83d758ddd..5d37fe935a 100644
--- a/vendor/src/github.com/docker/libcontainer/SPEC.md
+++ b/vendor/src/github.com/docker/libcontainer/SPEC.md
@@ -15,7 +15,7 @@ with a strong security configuration.
### System Requirements and Compatibility
Minimum requirements:
-* Kernel version - 3.8 recommended 2.6.2x minimum(with backported patches)
+* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
* Mounted cgroups with each subsystem in its own hierarchy
@@ -28,11 +28,9 @@ Minimum requirements:
| CLONE_NEWIPC | 1 |
| CLONE_NEWNET | 1 |
| CLONE_NEWNS | 1 |
-| CLONE_NEWUSER | 0 |
+| CLONE_NEWUSER | 1 |
-In v1 the user namespace is not enabled by default for support of older kernels
-where the user namespace feature is not fully implemented. Namespaces are
-created for the container via the `clone` syscall.
+Namespaces are created for the container via the `clone` syscall.
### Filesystem
@@ -143,6 +141,7 @@ system resources like cpu, memory, and device access.
| blkio | 1 |
| perf_event | 1 |
| freezer | 1 |
+| hugetlb | 1 |
All cgroup subsystem are joined so that statistics can be collected from
@@ -165,6 +164,7 @@ provide a good default for security and flexibility for the applications.
| -------------------- | ------- |
| CAP_NET_RAW | 1 |
| CAP_NET_BIND_SERVICE | 1 |
+| CAP_AUDIT_READ | 1 |
| CAP_AUDIT_WRITE | 1 |
| CAP_DAC_OVERRIDE | 1 |
| CAP_SETFCAP | 1 |
@@ -217,17 +217,6 @@ profile flags=(attach_disconnected,mediate_deleted) {
file,
umount,
- mount fstype=tmpfs,
- mount fstype=mqueue,
- mount fstype=fuse.*,
- mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
- mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
- mount fstype=fusectl -> /sys/fs/fuse/connections/,
- mount fstype=securityfs -> /sys/kernel/security/,
- mount fstype=debugfs -> /sys/kernel/debug/,
- mount fstype=proc -> /proc/,
- mount fstype=sysfs -> /sys/,
-
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
@@ -235,9 +224,7 @@ profile flags=(attach_disconnected,mediate_deleted) {
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
- deny mount options=(ro, remount) -> /,
- deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/,
- deny mount fstype=devpts,
+ deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
@@ -317,6 +304,7 @@ a container.
| Pause | Pause all processes inside the container |
| Resume | Resume all processes inside the container if paused |
| Exec | Execute a new process inside of the container ( requires setns ) |
+| Set | Setup configs of the container after it's created |
### Execute a new process inside of a running container.
diff --git a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
index 3be3294d85..18cedf6a19 100644
--- a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
+++ b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
@@ -14,8 +14,10 @@ import (
func IsEnabled() bool {
if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
- buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
- return err == nil && len(buf) > 1 && buf[0] == 'Y'
+ if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
+ buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+ return err == nil && len(buf) > 1 && buf[0] == 'Y'
+ }
}
return false
}
diff --git a/vendor/src/github.com/docker/libcontainer/apparmor/gen.go b/vendor/src/github.com/docker/libcontainer/apparmor/gen.go
index 4565f6dfec..a3192e23b2 100644
--- a/vendor/src/github.com/docker/libcontainer/apparmor/gen.go
+++ b/vendor/src/github.com/docker/libcontainer/apparmor/gen.go
@@ -27,17 +27,6 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
file,
umount,
- mount fstype=tmpfs,
- mount fstype=mqueue,
- mount fstype=fuse.*,
- mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/,
- mount fstype=efivarfs -> /sys/firmware/efi/efivars/,
- mount fstype=fusectl -> /sys/fs/fuse/connections/,
- mount fstype=securityfs -> /sys/kernel/security/,
- mount fstype=debugfs -> /sys/kernel/debug/,
- mount fstype=proc -> /proc/,
- mount fstype=sysfs -> /sys/,
-
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
@@ -45,9 +34,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
- deny mount options=(ro, remount) -> /,
- deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/,
- deny mount fstype=devpts,
+ deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
index 0a2d76bcd4..99c7845745 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
@@ -1,6 +1,8 @@
package fs
import (
+ "fmt"
+ "io"
"io/ioutil"
"os"
"path/filepath"
@@ -19,6 +21,7 @@ var (
"cpuset": &CpusetGroup{},
"cpuacct": &CpuacctGroup{},
"blkio": &BlkioGroup{},
+ "hugetlb": &HugetlbGroup{},
"perf_event": &PerfEventGroup{},
"freezer": &FreezerGroup{},
}
@@ -75,10 +78,13 @@ type data struct {
}
func (m *Manager) Apply(pid int) error {
+
if m.Cgroups == nil {
return nil
}
+ var c = m.Cgroups
+
d, err := getCgroupData(m.Cgroups, pid)
if err != nil {
return err
@@ -108,6 +114,12 @@ func (m *Manager) Apply(pid int) error {
}
m.Paths = paths
+ if paths["cpu"] != "" {
+ if err := CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
+ return err
+ }
+ }
+
return nil
}
@@ -119,19 +131,6 @@ func (m *Manager) GetPaths() map[string]string {
return m.Paths
}
-// Symmetrical public function to update device based cgroups. Also available
-// in the systemd implementation.
-func ApplyDevices(c *configs.Cgroup, pid int) error {
- d, err := getCgroupData(c, pid)
- if err != nil {
- return err
- }
-
- devices := subsystems["devices"]
-
- return devices.Apply(d)
-}
-
func (m *Manager) GetStats() (*cgroups.Stats, error) {
stats := cgroups.NewStats()
for name, path := range m.Paths {
@@ -263,6 +262,11 @@ func (raw *data) join(subsystem string) (string, error) {
}
func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s.", file)
+ }
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
}
@@ -280,3 +284,27 @@ func removePath(p string, err error) error {
}
return nil
}
+
+func CheckCpushares(path string, c int64) error {
+ var cpuShares int64
+
+ fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+
+ _, err = fmt.Fscanf(fd, "%d", &cpuShares)
+ if err != nil && err != io.EOF {
+ return err
+ }
+ if c != 0 {
+ if c > cpuShares {
+ return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+ } else if c < cpuShares {
+ return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
index 8e132643bb..06f0a3b2cd 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
@@ -35,6 +35,32 @@ func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error {
}
}
+ if cgroup.BlkioWeightDevice != "" {
+ if err := writeFile(path, "blkio.weight_device", cgroup.BlkioWeightDevice); err != nil {
+ return err
+ }
+ }
+ if cgroup.BlkioThrottleReadBpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", cgroup.BlkioThrottleReadBpsDevice); err != nil {
+ return err
+ }
+ }
+ if cgroup.BlkioThrottleWriteBpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", cgroup.BlkioThrottleWriteBpsDevice); err != nil {
+ return err
+ }
+ }
+ if cgroup.BlkioThrottleReadIOpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", cgroup.BlkioThrottleReadIOpsDevice); err != nil {
+ return err
+ }
+ }
+ if cgroup.BlkioThrottleWriteIOpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", cgroup.BlkioThrottleWriteIOpsDevice); err != nil {
+ return err
+ }
+ }
+
return nil
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
index 9ef93fcff2..9d0915da32 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
@@ -67,6 +67,8 @@ Total 22061056`
252:0 Async 164
252:0 Total 164
Total 328`
+ throttleBefore = `8:0 1024`
+ throttleAfter = `8:0 2048`
)
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
@@ -102,6 +104,35 @@ func TestBlkioSetWeight(t *testing.T) {
}
}
+func TestBlkioSetWeightDevice(t *testing.T) {
+ helper := NewCgroupTestUtil("blkio", t)
+ defer helper.cleanup()
+
+ const (
+ weightDeviceBefore = "8:0 400"
+ weightDeviceAfter = "8:0 500"
+ )
+
+ helper.writeFileContents(map[string]string{
+ "blkio.weight_device": weightDeviceBefore,
+ })
+
+ helper.CgroupData.c.BlkioWeightDevice = weightDeviceAfter
+ blkio := &BlkioGroup{}
+ if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
+ if err != nil {
+ t.Fatalf("Failed to parse blkio.weight_device - %s", err)
+ }
+
+ if value != weightDeviceAfter {
+ t.Fatal("Got the wrong value, set blkio.weight_device failed.")
+ }
+}
+
func TestBlkioStats(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
@@ -442,3 +473,96 @@ func TestNonCFQBlkioStats(t *testing.T) {
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
+
+func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
+ helper := NewCgroupTestUtil("blkio", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "blkio.throttle.read_bps_device": throttleBefore,
+ })
+
+ helper.CgroupData.c.BlkioThrottleReadBpsDevice = throttleAfter
+ blkio := &BlkioGroup{}
+ if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
+ if err != nil {
+ t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
+ }
+
+ if value != throttleAfter {
+ t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
+ }
+}
+func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
+ helper := NewCgroupTestUtil("blkio", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "blkio.throttle.write_bps_device": throttleBefore,
+ })
+
+ helper.CgroupData.c.BlkioThrottleWriteBpsDevice = throttleAfter
+ blkio := &BlkioGroup{}
+ if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
+ if err != nil {
+ t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
+ }
+
+ if value != throttleAfter {
+ t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
+ }
+}
+func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
+ helper := NewCgroupTestUtil("blkio", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "blkio.throttle.read_iops_device": throttleBefore,
+ })
+
+ helper.CgroupData.c.BlkioThrottleReadIOpsDevice = throttleAfter
+ blkio := &BlkioGroup{}
+ if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
+ if err != nil {
+ t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
+ }
+
+ if value != throttleAfter {
+ t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
+ }
+}
+func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
+ helper := NewCgroupTestUtil("blkio", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "blkio.throttle.write_iops_device": throttleBefore,
+ })
+
+ helper.CgroupData.c.BlkioThrottleWriteIOpsDevice = throttleAfter
+ blkio := &BlkioGroup{}
+ if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
+ if err != nil {
+ t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
+ }
+
+ if value != throttleAfter {
+ t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
index 1fbf7b1540..c9d4ad1a16 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
@@ -17,7 +17,7 @@ func (s *CpuGroup) Apply(d *data) error {
// We always want to join the cpu group, to allow fair cpu scheduling
// on a container basis
dir, err := d.join("cpu")
- if err != nil {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
index d8465a666b..6ad42a5838 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
@@ -16,7 +16,7 @@ type CpusetGroup struct {
func (s *CpusetGroup) Apply(d *data) error {
dir, err := d.path("cpuset")
- if err != nil {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
@@ -48,6 +48,11 @@ func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error {
}
func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+ // This might happen if we have no cpuset cgroup mounted.
+ // Just do nothing and don't fail.
+ if dir == "" {
+ return nil
+ }
if err := s.ensureParent(dir); err != nil {
return err
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
index 16e00b1c73..09ce92ef24 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
@@ -11,6 +11,8 @@ type DevicesGroup struct {
func (s *DevicesGroup) Apply(d *data) error {
dir, err := d.join("devices")
if err != nil {
+ // We will return error even it's `not found` error, devices
+ // cgroup is hard requirement for container's security.
return err
}
@@ -32,6 +34,17 @@ func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
return err
}
}
+ return nil
+ }
+
+ if err := writeFile(path, "devices.allow", "a"); err != nil {
+ return err
+ }
+
+ for _, dev := range cgroup.DeniedDevices {
+ if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+ return err
+ }
}
return nil
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
index 18bb127462..f950c1b9cf 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
@@ -17,7 +17,18 @@ var (
FileMode: 0666,
},
}
- allowedList = "c 1:5 rwm"
+ allowedList = "c 1:5 rwm"
+ deniedDevices = []*configs.Device{
+ {
+ Path: "/dev/null",
+ Type: 'c',
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ FileMode: 0666,
+ },
+ }
+ deniedList = "c 1:3 rwm"
)
func TestDevicesSetAllow(t *testing.T) {
@@ -44,3 +55,28 @@ func TestDevicesSetAllow(t *testing.T) {
t.Fatal("Got the wrong value, set devices.allow failed.")
}
}
+
+func TestDevicesSetDeny(t *testing.T) {
+ helper := NewCgroupTestUtil("devices", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "devices.allow": "a",
+ })
+
+ helper.CgroupData.c.AllowAllDevices = true
+ helper.CgroupData.c.DeniedDevices = deniedDevices
+ devices := &DevicesGroup{}
+ if err := devices.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+ t.Fatal(err)
+ }
+
+ value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
+ if err != nil {
+ t.Fatalf("Failed to parse devices.deny - %s", err)
+ }
+
+ if value != deniedList {
+ t.Fatal("Got the wrong value, set devices.deny failed.")
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 0000000000..8defdd1b91
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,29 @@
+package fs
+
+import (
+ "github.com/docker/libcontainer/cgroups"
+ "github.com/docker/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Apply(d *data) error {
+ // we just want to join this group even though we don't set anything
+ if _, err := d.join("hugetlb"); err != nil && !cgroups.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+ return nil
+}
+
+func (s *HugetlbGroup) Remove(d *data) error {
+ return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
index b99f81687a..2dcef0f44c 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
@@ -16,8 +16,7 @@ type MemoryGroup struct {
func (s *MemoryGroup) Apply(d *data) error {
dir, err := d.join("memory")
- // only return an error for memory if it was specified
- if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
defer func() {
@@ -95,6 +94,7 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error {
return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err)
}
stats.MemoryStats.Usage = value
+ stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes")
if err != nil {
return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err)
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
index 1e939c4e88..60edc67a52 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
@@ -128,7 +128,7 @@ func TestMemoryStats(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
+ expectedStats := cgroups.MemoryStats{Usage: 2048, Cache: 512, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
index c55ba938cb..48e2f3aeda 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
@@ -2,9 +2,9 @@ package fs
import (
"fmt"
- "log"
"testing"
+ "github.com/Sirupsen/logrus"
"github.com/docker/libcontainer/cgroups"
)
@@ -23,75 +23,75 @@ func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
- log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
- log.Printf("blkio IoServicedRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
- log.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
- log.Printf("blkio SectorsRecursive do not match - %s\n", err)
+ logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
- log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
- log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
- log.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
+ logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
- log.Printf("blkio IoTimeRecursive do not match - %s\n", err)
+ logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
t.Fail()
}
}
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
if expected != actual {
- log.Printf("Expected throttling data %v but found %v\n", expected, actual)
+ logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
t.Fail()
}
}
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
if expected.Usage != actual.Usage {
- log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
+ logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
t.Fail()
}
if expected.MaxUsage != actual.MaxUsage {
- log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
+ logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
t.Fail()
}
for key, expValue := range expected.Stats {
actValue, ok := actual.Stats[key]
if !ok {
- log.Printf("Expected memory stat key %s not found\n", key)
+ logrus.Printf("Expected memory stat key %s not found\n", key)
t.Fail()
}
if expValue != actValue {
- log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
+ logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
t.Fail()
}
}
if expected.Failcnt != actual.Failcnt {
- log.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
+ logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
t.Fail()
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
index dc5dbb3c21..25c8f199cc 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
@@ -33,6 +33,8 @@ type CpuStats struct {
type MemoryStats struct {
// current res_counter usage for memory
Usage uint64 `json:"usage,omitempty"`
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// TODO(vishh): Export these as stronger types.
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
index 95ed4ea7eb..9b605b3c05 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
@@ -46,10 +46,6 @@ func (m *Manager) Freeze(state configs.FreezerState) error {
return fmt.Errorf("Systemd not supported")
}
-func ApplyDevices(c *configs.Cgroup, pid int) error {
- return fmt.Errorf("Systemd not supported")
-}
-
func Freeze(c *configs.Cgroup, state configs.FreezerState) error {
return fmt.Errorf("Systemd not supported")
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
index 3609bccae6..4fb8d8d5ea 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
@@ -38,6 +38,7 @@ var subsystems = map[string]subsystem{
"cpuset": &fs.CpusetGroup{},
"cpuacct": &fs.CpuacctGroup{},
"blkio": &fs.BlkioGroup{},
+ "hugetlb": &fs.HugetlbGroup{},
"perf_event": &fs.PerfEventGroup{},
"freezer": &fs.FreezerGroup{},
}
@@ -216,6 +217,13 @@ func (m *Manager) Apply(pid int) error {
return err
}
+ // FIXME: Systemd does have `BlockIODeviceWeight` property, but we got problem
+ // using that (at least on systemd 208, see https://github.com/docker/libcontainer/pull/354),
+ // so use fs work around for now.
+ if err := joinBlkio(c, pid); err != nil {
+ return err
+ }
+
paths := make(map[string]string)
for sysname := range subsystems {
subsystemPath, err := getSubsystemPath(m.Cgroups, sysname)
@@ -228,9 +236,14 @@ func (m *Manager) Apply(pid int) error {
}
paths[sysname] = subsystemPath
}
-
m.Paths = paths
+ if paths["cpu"] != "" {
+ if err := fs.CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
+ return err
+ }
+ }
+
return nil
}
@@ -243,6 +256,11 @@ func (m *Manager) GetPaths() map[string]string {
}
func writeFile(dir, file, data string) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s.", file)
+ }
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
}
@@ -263,16 +281,16 @@ func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
func joinCpu(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "cpu")
- if err != nil {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
if c.CpuQuota != 0 {
- if err = ioutil.WriteFile(filepath.Join(path, "cpu.cfs_quota_us"), []byte(strconv.FormatInt(c.CpuQuota, 10)), 0700); err != nil {
+ if err = writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(c.CpuQuota, 10)); err != nil {
return err
}
}
if c.CpuPeriod != 0 {
- if err = ioutil.WriteFile(filepath.Join(path, "cpu.cfs_period_us"), []byte(strconv.FormatInt(c.CpuPeriod, 10)), 0700); err != nil {
+ if err = writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(c.CpuPeriod, 10)); err != nil {
return err
}
}
@@ -280,7 +298,7 @@ func joinCpu(c *configs.Cgroup, pid int) error {
}
func joinFreezer(c *configs.Cgroup, pid int) error {
- if _, err := join(c, "freezer", pid); err != nil {
+ if _, err := join(c, "freezer", pid); err != nil && !cgroups.IsNotFound(err) {
return err
}
@@ -350,7 +368,17 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) {
}
func (m *Manager) Set(container *configs.Config) error {
- panic("not implemented")
+ for name, path := range m.Paths {
+ sys, ok := subsystems[name]
+ if !ok || !cgroups.PathExists(path) {
+ continue
+ }
+ if err := sys.Set(path, container.Cgroups); err != nil {
+ return err
+ }
+ }
+
+ return nil
}
func getUnitName(c *configs.Cgroup) string {
@@ -362,7 +390,7 @@ func getUnitName(c *configs.Cgroup) string {
// * Support for wildcards to allow /dev/pts support
//
// The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
-// in wide use. When both these are availalable we will be able to switch, but need to keep the old
+// in wide use. When both these are available we will be able to switch, but need to keep the old
// implementation for backwards compat.
//
// Note: we can't use systemd to set up the initial limits, and then change the cgroup
@@ -370,22 +398,14 @@ func getUnitName(c *configs.Cgroup) string {
// This happens at least for v208 when any sibling unit is started.
func joinDevices(c *configs.Cgroup, pid int) error {
path, err := join(c, "devices", pid)
+ // Even if it's `not found` error, we'll return err because devices cgroup
+ // is hard requirement for container security.
if err != nil {
return err
}
devices := subsystems["devices"]
- if err := devices.Set(path, c); err != nil {
- return err
- }
-
- return nil
-}
-
-// Symmetrical public function to update device based cgroups. Also available
-// in the fs implementation.
-func ApplyDevices(c *configs.Cgroup, pid int) error {
- return joinDevices(c, pid)
+ return devices.Set(path, c)
}
func joinMemory(c *configs.Cgroup, pid int) error {
@@ -397,11 +417,11 @@ func joinMemory(c *configs.Cgroup, pid int) error {
}
path, err := getSubsystemPath(c, "memory")
- if err != nil {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
- return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700)
+ return writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10))
}
// systemd does not atm set up the cpuset controller, so we must manually
@@ -409,7 +429,7 @@ func joinMemory(c *configs.Cgroup, pid int) error {
// level must have a full setup as the default for a new directory is "no cpus"
func joinCpuset(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "cpuset")
- if err != nil {
+ if err != nil && !cgroups.IsNotFound(err) {
return err
}
@@ -417,3 +437,40 @@ func joinCpuset(c *configs.Cgroup, pid int) error {
return s.ApplyDir(path, c, pid)
}
+
+// `BlockIODeviceWeight` property of systemd does not work properly, and systemd
+// expects device path instead of major minor numbers, which is also confusing
+// for users. So we use fs work around for now.
+func joinBlkio(c *configs.Cgroup, pid int) error {
+ path, err := getSubsystemPath(c, "blkio")
+ if err != nil {
+ return err
+ }
+ if c.BlkioWeightDevice != "" {
+ if err := writeFile(path, "blkio.weight_device", c.BlkioWeightDevice); err != nil {
+ return err
+ }
+ }
+ if c.BlkioThrottleReadBpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.read_bps_device", c.BlkioThrottleReadBpsDevice); err != nil {
+ return err
+ }
+ }
+ if c.BlkioThrottleWriteBpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.write_bps_device", c.BlkioThrottleWriteBpsDevice); err != nil {
+ return err
+ }
+ }
+ if c.BlkioThrottleReadIOpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.read_iops_device", c.BlkioThrottleReadIOpsDevice); err != nil {
+ return err
+ }
+ }
+ if c.BlkioThrottleWriteIOpsDevice != "" {
+ if err := writeFile(path, "blkio.throttle.write_iops_device", c.BlkioThrottleWriteIOpsDevice); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/cgroup.go b/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
index 8bf174c195..8a161fcff6 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
@@ -19,6 +19,8 @@ type Cgroup struct {
AllowedDevices []*Device `json:"allowed_devices"`
+ DeniedDevices []*Device `json:"denied_devices"`
+
// Memory limit (in bytes)
Memory int64 `json:"memory"`
@@ -43,9 +45,24 @@ type Cgroup struct {
// MEM to use
CpusetMems string `json:"cpuset_mems"`
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice string `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per divice, bytes per second.
+ BlkioThrottleWriteBpsDevice string `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOpsDevice string `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOpsDevice string `json:"blkio_throttle_write_iops_device"`
+
// Specifies per cgroup weight, range is from 10 to 1000.
BlkioWeight int64 `json:"blkio_weight"`
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice string `json:"blkio_weight_device"`
+
// set the freeze value for the process
Freezer FreezerState `json:"freezer"`
diff --git a/vendor/src/github.com/docker/libcontainer/configs/config.go b/vendor/src/github.com/docker/libcontainer/configs/config.go
index b07f252b5e..2c311a0cdf 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/config.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/config.go
@@ -37,6 +37,9 @@ type Config struct {
// bind mounts are writtable.
Readonlyfs bool `json:"readonlyfs"`
+ // Privatefs will mount the container's rootfs as private where mount points from the parent will not propogate
+ Privatefs bool `json:"privatefs"`
+
// Mounts specify additional source and destination paths that will be mounted inside the container's
// rootfs and mount namespace if specified
Mounts []*Mount `json:"mounts"`
@@ -96,6 +99,10 @@ type Config struct {
// ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
// so that these files prevent any writes.
ReadonlyPaths []string `json:"readonly_paths"`
+
+ // SystemProperties is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ SystemProperties map[string]string `json:"system_properties"`
}
// Gets the root uid for the process on host which could be non-zero
diff --git a/vendor/src/github.com/docker/libcontainer/configs/mount.go b/vendor/src/github.com/docker/libcontainer/configs/mount.go
index 7b3dea3312..5a69f815e4 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/mount.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/mount.go
@@ -18,4 +18,17 @@ type Mount struct {
// Relabel source if set, "z" indicates shared, "Z" indicates unshared.
Relabel string `json:"relabel"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
index ac6a7fa2cd..2c2a9fd20a 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
@@ -1,9 +1,6 @@
package configs
-import (
- "fmt"
- "syscall"
-)
+import "fmt"
type NamespaceType string
@@ -34,10 +31,6 @@ type Namespace struct {
Path string `json:"path"`
}
-func (n *Namespace) Syscall() int {
- return namespaceInfo[n.Type]
-}
-
func (n *Namespace) GetPath(pid int) string {
if n.Path != "" {
return n.Path
@@ -96,25 +89,3 @@ func (n *Namespaces) index(t NamespaceType) int {
func (n *Namespaces) Contains(t NamespaceType) bool {
return n.index(t) != -1
}
-
-var namespaceInfo = map[NamespaceType]int{
- NEWNET: syscall.CLONE_NEWNET,
- NEWNS: syscall.CLONE_NEWNS,
- NEWUSER: syscall.CLONE_NEWUSER,
- NEWIPC: syscall.CLONE_NEWIPC,
- NEWUTS: syscall.CLONE_NEWUTS,
- NEWPID: syscall.CLONE_NEWPID,
-}
-
-// CloneFlags parses the container's Namespaces options to set the correct
-// flags on clone, unshare. This functions returns flags only for new namespaces.
-func (n *Namespaces) CloneFlags() uintptr {
- var flag int
- for _, v := range *n {
- if v.Path != "" {
- continue
- }
- flag |= namespaceInfo[v.Type]
- }
- return uintptr(flag)
-}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 0000000000..c962999efd
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "syscall"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: syscall.CLONE_NEWNET,
+ NEWNS: syscall.CLONE_NEWNS,
+ NEWUSER: syscall.CLONE_NEWUSER,
+ NEWIPC: syscall.CLONE_NEWIPC,
+ NEWUTS: syscall.CLONE_NEWUTS,
+ NEWPID: syscall.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 0000000000..1bd26bd6e6
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+ return 0
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+ return uintptr(0)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/network.go b/vendor/src/github.com/docker/libcontainer/configs/network.go
index 9d5ed7a65f..ccdb228e14 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/network.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/network.go
@@ -2,7 +2,7 @@ package configs
// Network defines configuration for a container's networking stack
//
-// The network configuration can be omited from a container causing the
+// The network configuration can be omitted from a container causing the
// container to be setup with the host's networking stack
type Network struct {
// Type sets the networks type, commonly veth and loopback
@@ -53,7 +53,7 @@ type Network struct {
// Routes can be specified to create entries in the route table as the container is started
//
// All of destination, source, and gateway should be either IPv4 or IPv6.
-// One of the three options must be present, and ommitted entries will use their
+// One of the three options must be present, and omitted entries will use their
// IP family default for the route table. For IPv4 for example, setting the
// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
// destination of 0.0.0.0(or *) when viewed in the route table.
diff --git a/vendor/src/github.com/docker/libcontainer/console_linux.go b/vendor/src/github.com/docker/libcontainer/console_linux.go
index afdc2976c4..a3a0551cf6 100644
--- a/vendor/src/github.com/docker/libcontainer/console_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/console_linux.go
@@ -38,7 +38,7 @@ func newConsole(uid, gid int) (Console, error) {
}, nil
}
-// newConsoleFromPath is an internal fucntion returning an initialzied console for use inside
+// newConsoleFromPath is an internal function returning an initialized console for use inside
// a container's MNT namespace.
func newConsoleFromPath(slavePath string) *linuxConsole {
return &linuxConsole{
diff --git a/vendor/src/github.com/docker/libcontainer/container.go b/vendor/src/github.com/docker/libcontainer/container.go
index 35bdfd781f..a38df8269d 100644
--- a/vendor/src/github.com/docker/libcontainer/container.go
+++ b/vendor/src/github.com/docker/libcontainer/container.go
@@ -67,7 +67,7 @@ type Container interface {
// State returns the current container's state information.
//
// errors:
- // Systemerror - System erroor.
+ // Systemerror - System error.
State() (*State, error)
// Returns the current config of the container.
diff --git a/vendor/src/github.com/docker/libcontainer/container_linux.go b/vendor/src/github.com/docker/libcontainer/container_linux.go
index d52610f073..8a7728a9e0 100644
--- a/vendor/src/github.com/docker/libcontainer/container_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/container_linux.go
@@ -11,11 +11,13 @@ import (
"sync"
"syscall"
- log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
"github.com/docker/libcontainer/cgroups"
"github.com/docker/libcontainer/configs"
)
+const stdioFdCount = 3
+
type linuxContainer struct {
id string
root string
@@ -100,7 +102,7 @@ func (c *linuxContainer) Start(process *Process) error {
if err := parent.start(); err != nil {
// terminate the process to ensure that it properly is reaped.
if err := parent.terminate(); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
return newSystemError(err)
}
@@ -139,7 +141,8 @@ func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
- cmd.ExtraFiles = []*os.File{childPipe}
+ cmd.ExtraFiles = append(p.ExtraFiles, childPipe)
+ cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
// even with the parent still running.
@@ -178,11 +181,9 @@ func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe,
fmt.Sprintf("_LIBCONTAINER_INITPID=%d", c.initProcess.pid()),
"_LIBCONTAINER_INITTYPE=setns",
)
-
if p.consolePath != "" {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_CONSOLE_PATH="+p.consolePath)
}
-
// TODO: set on container for process management
return &setnsProcess{
cmd: cmd,
@@ -195,13 +196,14 @@ func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe,
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
return &initConfig{
- Config: c.config,
- Args: process.Args,
- Env: process.Env,
- User: process.User,
- Cwd: process.Cwd,
- Console: process.consolePath,
- Capabilities: process.Capabilities,
+ Config: c.config,
+ Args: process.Args,
+ Env: process.Env,
+ User: process.User,
+ Cwd: process.Cwd,
+ Console: process.consolePath,
+ Capabilities: process.Capabilities,
+ PassedFilesCount: len(process.ExtraFiles),
}
}
@@ -225,7 +227,7 @@ func (c *linuxContainer) Destroy() error {
}
if !c.config.Namespaces.Contains(configs.NEWPID) {
if err := killCgroupProcesses(c.cgroupManager); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
}
err = c.cgroupManager.Destroy()
diff --git a/vendor/src/github.com/docker/libcontainer/devices/devices.go b/vendor/src/github.com/docker/libcontainer/devices/devices.go
index 537f71aff1..7a11eaf11b 100644
--- a/vendor/src/github.com/docker/libcontainer/devices/devices.go
+++ b/vendor/src/github.com/docker/libcontainer/devices/devices.go
@@ -21,7 +21,7 @@ var (
ioutilReadDir = ioutil.ReadDir
)
-// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct.
+// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
func DeviceFromPath(path, permissions string) (*configs.Device, error) {
fileInfo, err := osLstat(path)
if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/factory.go b/vendor/src/github.com/docker/libcontainer/factory.go
index 0c9fa63a32..2b3ff85d8f 100644
--- a/vendor/src/github.com/docker/libcontainer/factory.go
+++ b/vendor/src/github.com/docker/libcontainer/factory.go
@@ -32,15 +32,13 @@ type Factory interface {
// System error
Load(id string) (Container, error)
- // StartInitialization is an internal API to libcontainer used during the rexec of the
- // container. pipefd is the fd to the child end of the pipe used to syncronize the
- // parent and child process providing state and configuration to the child process and
- // returning any errors during the init of the container
+ // StartInitialization is an internal API to libcontainer used during the reexec of the
+ // container.
//
// Errors:
- // pipe connection error
- // system error
- StartInitialization(pipefd uintptr) error
+ // Pipe connection error
+ // System error
+ StartInitialization() error
// Type returns info string about factory type (e.g. lxc, libcontainer...)
Type() string
diff --git a/vendor/src/github.com/docker/libcontainer/factory_linux.go b/vendor/src/github.com/docker/libcontainer/factory_linux.go
index a2d3bec780..3cf1c3d25f 100644
--- a/vendor/src/github.com/docker/libcontainer/factory_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/factory_linux.go
@@ -10,6 +10,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
+ "strconv"
"syscall"
"github.com/docker/docker/pkg/mount"
@@ -194,7 +195,11 @@ func (l *LinuxFactory) Type() string {
// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
// This is a low level implementation detail of the reexec and should not be consumed externally
-func (l *LinuxFactory) StartInitialization(pipefd uintptr) (err error) {
+func (l *LinuxFactory) StartInitialization() (err error) {
+ pipefd, err := strconv.Atoi(os.Getenv("_LIBCONTAINER_INITPIPE"))
+ if err != nil {
+ return err
+ }
var (
pipe = os.NewFile(uintptr(pipefd), "pipe")
it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
diff --git a/vendor/src/github.com/docker/libcontainer/init_linux.go b/vendor/src/github.com/docker/libcontainer/init_linux.go
index 1786b1ed7a..1771fd1930 100644
--- a/vendor/src/github.com/docker/libcontainer/init_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/init_linux.go
@@ -9,7 +9,7 @@ import (
"strings"
"syscall"
- log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
"github.com/docker/libcontainer/cgroups"
"github.com/docker/libcontainer/configs"
"github.com/docker/libcontainer/netlink"
@@ -40,14 +40,15 @@ type network struct {
// initConfig is used for transferring parameters from Exec() to Init()
type initConfig struct {
- Args []string `json:"args"`
- Env []string `json:"env"`
- Cwd string `json:"cwd"`
- Capabilities []string `json:"capabilities"`
- User string `json:"user"`
- Config *configs.Config `json:"config"`
- Console string `json:"console"`
- Networks []*network `json:"network"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Cwd string `json:"cwd"`
+ Capabilities []string `json:"capabilities"`
+ User string `json:"user"`
+ Config *configs.Config `json:"config"`
+ Console string `json:"console"`
+ Networks []*network `json:"network"`
+ PassedFilesCount int `json:"passed_files_count"`
}
type initer interface {
@@ -95,10 +96,10 @@ func populateProcessEnvironment(env []string) error {
// and working dir, and closes any leaked file descriptors
// before executing the command inside the namespace
func finalizeNamespace(config *initConfig) error {
- // Ensure that all non-standard fds we may have accidentally
+ // Ensure that all unwanted fds we may have accidentally
// inherited are marked close-on-exec so they stay out of the
// container
- if err := utils.CloseExecFrom(3); err != nil {
+ if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
return err
}
@@ -233,7 +234,7 @@ func setupRlimits(config *configs.Config) error {
func killCgroupProcesses(m cgroups.Manager) error {
var procs []*os.Process
if err := m.Freeze(configs.Frozen); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
pids, err := m.GetPids()
if err != nil {
@@ -244,16 +245,16 @@ func killCgroupProcesses(m cgroups.Manager) error {
if p, err := os.FindProcess(pid); err == nil {
procs = append(procs, p)
if err := p.Kill(); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
}
}
if err := m.Freeze(configs.Thawed); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
for _, p := range procs {
if _, err := p.Wait(); err != nil {
- log.Warn(err)
+ logrus.Warn(err)
}
}
return nil
diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
index 12457ba1a2..fea5f7ee0a 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
@@ -4,8 +4,10 @@ import (
"bytes"
"io/ioutil"
"os"
+ "path/filepath"
"strconv"
"strings"
+ "syscall"
"testing"
"github.com/docker/libcontainer"
@@ -29,9 +31,7 @@ func testExecPS(t *testing.T, userns bool) {
return
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
if userns {
@@ -64,21 +64,15 @@ func TestIPCPrivate(t *testing.T) {
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
l, err := os.Readlink("/proc/1/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
config := newTemplateConfig(rootfs)
buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if exitCode != 0 {
t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -95,22 +89,16 @@ func TestIPCHost(t *testing.T) {
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
l, err := os.Readlink("/proc/1/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
config := newTemplateConfig(rootfs)
config.Namespaces.Remove(configs.NEWIPC)
buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if exitCode != 0 {
t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -127,23 +115,17 @@ func TestIPCJoinPath(t *testing.T) {
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
l, err := os.Readlink("/proc/1/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
config := newTemplateConfig(rootfs)
config.Namespaces.Add(configs.NEWIPC, "/proc/1/ns/ipc")
buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if exitCode != 0 {
t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -160,9 +142,7 @@ func TestIPCBadPath(t *testing.T) {
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
@@ -180,16 +160,12 @@ func TestRlimit(t *testing.T) {
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
out, _, err := runContainer(config, "", "/bin/sh", "-c", "ulimit -n")
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if limit := strings.TrimSpace(out.Stdout.String()); limit != "1025" {
t.Fatalf("expected rlimit to be 1025, got %s", limit)
}
@@ -208,9 +184,7 @@ func newTestRoot() (string, error) {
func waitProcess(p *libcontainer.Process, t *testing.T) {
status, err := p.Wait()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if !status.Success() {
t.Fatal(status)
}
@@ -221,35 +195,22 @@ func TestEnter(t *testing.T) {
return
}
root, err := newTestRoot()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer os.RemoveAll(root)
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
- factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
- if err != nil {
- t.Fatal(err)
- }
-
container, err := factory.Create("test", config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
var stdout, stdout2 bytes.Buffer
@@ -262,19 +223,13 @@ func TestEnter(t *testing.T) {
err = container.Start(&pconfig)
stdinR.Close()
defer stdinW.Close()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pid, err := pconfig.Pid()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
// Execute another process in the container
stdinR2, stdinW2, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pconfig2 := libcontainer.Process{
Env: standardEnvironment,
}
@@ -285,19 +240,13 @@ func TestEnter(t *testing.T) {
err = container.Start(&pconfig2)
stdinR2.Close()
defer stdinW2.Close()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pid2, err := pconfig2.Pid()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
processes, err := container.Processes()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
n := 0
for i := range processes {
@@ -318,14 +267,10 @@ func TestEnter(t *testing.T) {
// Check that both processes live in the same pidns
pidns := string(stdout.Bytes())
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pidns2 := string(stdout2.Bytes())
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
if pidns != pidns2 {
t.Fatal("The second process isn't in the required pid namespace", pidns, pidns2)
@@ -337,28 +282,17 @@ func TestProcessEnv(t *testing.T) {
return
}
root, err := newTestRoot()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer os.RemoveAll(root)
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
- factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
- if err != nil {
- t.Fatal(err)
- }
-
container, err := factory.Create("test", config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
var stdout bytes.Buffer
@@ -374,17 +308,12 @@ func TestProcessEnv(t *testing.T) {
Stdout: &stdout,
}
err = container.Start(&pconfig)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
// Wait for process
waitProcess(&pconfig, t)
outputEnv := string(stdout.Bytes())
- if err != nil {
- t.Fatal(err)
- }
// Check that the environment has the key/value pair we added
if !strings.Contains(outputEnv, "FOO=BAR") {
@@ -402,28 +331,17 @@ func TestProcessCaps(t *testing.T) {
return
}
root, err := newTestRoot()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer os.RemoveAll(root)
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
- factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
- if err != nil {
- t.Fatal(err)
- }
-
container, err := factory.Create("test", config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
processCaps := append(config.Capabilities, "NET_ADMIN")
@@ -437,17 +355,12 @@ func TestProcessCaps(t *testing.T) {
Stdout: &stdout,
}
err = container.Start(&pconfig)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
// Wait for process
waitProcess(&pconfig, t)
outputStatus := string(stdout.Bytes())
- if err != nil {
- t.Fatal(err)
- }
lines := strings.Split(outputStatus, "\n")
@@ -497,37 +410,25 @@ func testFreeze(t *testing.T, systemd bool) {
return
}
root, err := newTestRoot()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer os.RemoveAll(root)
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
+ f := factory
if systemd {
- config.Cgroups.Slice = "system.slice"
+ f = systemdFactory
}
- factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
- if err != nil {
- t.Fatal(err)
- }
-
- container, err := factory.Create("test", config)
- if err != nil {
- t.Fatal(err)
- }
+ container, err := f.Create("test", config)
+ ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pconfig := libcontainer.Process{
Args: []string{"cat"},
@@ -537,44 +438,64 @@ func testFreeze(t *testing.T, systemd bool) {
err = container.Start(&pconfig)
stdinR.Close()
defer stdinW.Close()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
pid, err := pconfig.Pid()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
process, err := os.FindProcess(pid)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
- if err := container.Pause(); err != nil {
- t.Fatal(err)
- }
+ err = container.Pause()
+ ok(t, err)
state, err := container.Status()
- if err != nil {
- t.Fatal(err)
- }
- if err := container.Resume(); err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
+ err = container.Resume()
+ ok(t, err)
if state != libcontainer.Paused {
t.Fatal("Unexpected state: ", state)
}
stdinW.Close()
s, err := process.Wait()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
+
if !s.Success() {
t.Fatal(s.String())
}
}
+func TestCpuShares(t *testing.T) {
+ testCpuShares(t, false)
+}
+
+func TestSystemdCpuShares(t *testing.T) {
+ if !systemd.UseSystemd() {
+ t.Skip("Systemd is unsupported")
+ }
+ testCpuShares(t, true)
+}
+
+func testCpuShares(t *testing.T, systemd bool) {
+ if testing.Short() {
+ return
+ }
+ rootfs, err := newRootfs()
+ ok(t, err)
+ defer remove(rootfs)
+
+ config := newTemplateConfig(rootfs)
+ if systemd {
+ config.Cgroups.Slice = "system.slice"
+ }
+ config.Cgroups.CpuShares = 1
+
+ _, _, err = runContainer(config, "", "ps")
+ if err == nil {
+ t.Fatalf("runContainer should failed with invalid CpuShares")
+ }
+}
+
func TestContainerState(t *testing.T) {
if testing.Short() {
return
@@ -606,11 +527,6 @@ func TestContainerState(t *testing.T) {
{Type: configs.NEWNET},
})
- factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
- if err != nil {
- t.Fatal(err)
- }
-
container, err := factory.Create("test", config)
if err != nil {
t.Fatal(err)
@@ -648,3 +564,172 @@ func TestContainerState(t *testing.T) {
stdinW.Close()
p.Wait()
}
+
+func TestPassExtraFiles(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ rootfs, err := newRootfs()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer remove(rootfs)
+
+ config := newTemplateConfig(rootfs)
+
+ container, err := factory.Create("test", config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer container.Destroy()
+
+ var stdout bytes.Buffer
+ pipeout1, pipein1, err := os.Pipe()
+ pipeout2, pipein2, err := os.Pipe()
+ process := libcontainer.Process{
+ Args: []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
+ Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
+ ExtraFiles: []*os.File{pipein1, pipein2},
+ Stdin: nil,
+ Stdout: &stdout,
+ }
+ err = container.Start(&process)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ waitProcess(&process, t)
+
+ out := string(stdout.Bytes())
+ // fd 5 is the directory handle for /proc/$$/fd
+ if out != "0 1 2 3 4 5" {
+ t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to init, got '%s'", out)
+ }
+ var buf = []byte{0}
+ _, err = pipeout1.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out1 := string(buf)
+ if out1 != "1" {
+ t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
+ }
+
+ _, err = pipeout2.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out2 := string(buf)
+ if out2 != "2" {
+ t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
+ }
+}
+
+func TestMountCmds(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ root, err := newTestRoot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(root)
+
+ rootfs, err := newRootfs()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer remove(rootfs)
+
+ tmpDir, err := ioutil.TempDir("", "tmpdir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ config := newTemplateConfig(rootfs)
+ config.Mounts = append(config.Mounts, &configs.Mount{
+ Source: tmpDir,
+ Destination: "/tmp",
+ Device: "bind",
+ Flags: syscall.MS_BIND | syscall.MS_REC,
+ PremountCmds: []configs.Command{
+ {Path: "touch", Args: []string{filepath.Join(tmpDir, "hello")}},
+ {Path: "touch", Args: []string{filepath.Join(tmpDir, "world")}},
+ },
+ PostmountCmds: []configs.Command{
+ {Path: "cp", Args: []string{filepath.Join(rootfs, "tmp", "hello"), filepath.Join(rootfs, "tmp", "hello-backup")}},
+ {Path: "cp", Args: []string{filepath.Join(rootfs, "tmp", "world"), filepath.Join(rootfs, "tmp", "world-backup")}},
+ },
+ })
+
+ container, err := factory.Create("test", config)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer container.Destroy()
+
+ pconfig := libcontainer.Process{
+ Args: []string{"sh", "-c", "env"},
+ Env: standardEnvironment,
+ }
+ err = container.Start(&pconfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Wait for process
+ waitProcess(&pconfig, t)
+
+ entries, err := ioutil.ReadDir(tmpDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := []string{"hello", "hello-backup", "world", "world-backup"}
+ for i, e := range entries {
+ if e.Name() != expected[i] {
+ t.Errorf("Got(%s), expect %s", e.Name(), expected[i])
+ }
+ }
+}
+
+func TestSystemProperties(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ root, err := newTestRoot()
+ ok(t, err)
+ defer os.RemoveAll(root)
+
+ rootfs, err := newRootfs()
+ ok(t, err)
+ defer remove(rootfs)
+
+ config := newTemplateConfig(rootfs)
+ config.SystemProperties = map[string]string{
+ "kernel.shmmni": "8192",
+ }
+
+ container, err := factory.Create("test", config)
+ ok(t, err)
+ defer container.Destroy()
+
+ var stdout bytes.Buffer
+ pconfig := libcontainer.Process{
+ Args: []string{"sh", "-c", "cat /proc/sys/kernel/shmmni"},
+ Env: standardEnvironment,
+ Stdin: nil,
+ Stdout: &stdout,
+ }
+ err = container.Start(&pconfig)
+ ok(t, err)
+
+ // Wait for process
+ waitProcess(&pconfig, t)
+
+ shmmniOutput := strings.TrimSpace(string(stdout.Bytes()))
+ if shmmniOutput != "8192" {
+ t.Fatalf("kernel.shmmni property expected to be 8192, but is %s", shmmniOutput)
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/integration/execin_test.go b/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
index 252e6e415e..f81faf010a 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
@@ -16,22 +16,16 @@ func TestExecIn(t *testing.T) {
return
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
process := &libcontainer.Process{
Args: []string{"cat"},
Env: standardEnvironment,
@@ -40,9 +34,7 @@ func TestExecIn(t *testing.T) {
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
@@ -53,12 +45,9 @@ func TestExecIn(t *testing.T) {
Stderr: buffers.Stderr,
}
err = container.Start(ps)
- if err != nil {
- t.Fatal(err)
- }
- if _, err := ps.Wait(); err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
+ _, err = ps.Wait()
+ ok(t, err)
stdinW.Close()
if _, err := process.Wait(); err != nil {
t.Log(err)
@@ -74,21 +63,15 @@ func TestExecInRlimit(t *testing.T) {
return
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
process := &libcontainer.Process{
Args: []string{"cat"},
Env: standardEnvironment,
@@ -97,9 +80,7 @@ func TestExecInRlimit(t *testing.T) {
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
@@ -110,12 +91,9 @@ func TestExecInRlimit(t *testing.T) {
Stderr: buffers.Stderr,
}
err = container.Start(ps)
- if err != nil {
- t.Fatal(err)
- }
- if _, err := ps.Wait(); err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
+ _, err = ps.Wait()
+ ok(t, err)
stdinW.Close()
if _, err := process.Wait(); err != nil {
t.Log(err)
@@ -131,22 +109,16 @@ func TestExecInError(t *testing.T) {
return
}
rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
process := &libcontainer.Process{
Args: []string{"cat"},
Env: standardEnvironment,
@@ -160,9 +132,7 @@ func TestExecInError(t *testing.T) {
t.Log(err)
}
}()
- if err != nil {
- t.Fatal(err)
- }
+ ok(t, err)
unexistent := &libcontainer.Process{
Args: []string{"unexistent"},
@@ -178,6 +148,121 @@ func TestExecInError(t *testing.T) {
}
func TestExecInTTY(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ rootfs, err := newRootfs()
+ ok(t, err)
+ defer remove(rootfs)
+ config := newTemplateConfig(rootfs)
+ container, err := newContainer(config)
+ ok(t, err)
+ defer container.Destroy()
+
+ // Execute a first process in the container
+ stdinR, stdinW, err := os.Pipe()
+ ok(t, err)
+ process := &libcontainer.Process{
+ Args: []string{"cat"},
+ Env: standardEnvironment,
+ Stdin: stdinR,
+ }
+ err = container.Start(process)
+ stdinR.Close()
+ defer stdinW.Close()
+ ok(t, err)
+
+ var stdout bytes.Buffer
+ ps := &libcontainer.Process{
+ Args: []string{"ps"},
+ Env: standardEnvironment,
+ }
+ console, err := ps.NewConsole(0)
+ copy := make(chan struct{})
+ go func() {
+ io.Copy(&stdout, console)
+ close(copy)
+ }()
+ ok(t, err)
+ err = container.Start(ps)
+ ok(t, err)
+ select {
+ case <-time.After(5 * time.Second):
+ t.Fatal("Waiting for copy timed out")
+ case <-copy:
+ }
+ _, err = ps.Wait()
+ ok(t, err)
+ stdinW.Close()
+ if _, err := process.Wait(); err != nil {
+ t.Log(err)
+ }
+ out := stdout.String()
+ if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
+ t.Fatalf("unexpected running process, output %q", out)
+ }
+}
+
+func TestExecInEnvironment(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ rootfs, err := newRootfs()
+ ok(t, err)
+ defer remove(rootfs)
+ config := newTemplateConfig(rootfs)
+ container, err := newContainer(config)
+ ok(t, err)
+ defer container.Destroy()
+
+ // Execute a first process in the container
+ stdinR, stdinW, err := os.Pipe()
+ ok(t, err)
+ process := &libcontainer.Process{
+ Args: []string{"cat"},
+ Env: standardEnvironment,
+ Stdin: stdinR,
+ }
+ err = container.Start(process)
+ stdinR.Close()
+ defer stdinW.Close()
+ ok(t, err)
+
+ buffers := newStdBuffers()
+ process2 := &libcontainer.Process{
+ Args: []string{"env"},
+ Env: []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "DEBUG=true",
+ "DEBUG=false",
+ "ENV=test",
+ },
+ Stdin: buffers.Stdin,
+ Stdout: buffers.Stdout,
+ Stderr: buffers.Stderr,
+ }
+ err = container.Start(process2)
+ ok(t, err)
+ if _, err := process2.Wait(); err != nil {
+ out := buffers.Stdout.String()
+ t.Fatal(err, out)
+ }
+ stdinW.Close()
+ if _, err := process.Wait(); err != nil {
+ t.Log(err)
+ }
+ out := buffers.Stdout.String()
+ // check execin's process environment
+ if !strings.Contains(out, "DEBUG=false") ||
+ !strings.Contains(out, "ENV=test") ||
+ !strings.Contains(out, "HOME=/root") ||
+ !strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
+ strings.Contains(out, "DEBUG=true") {
+ t.Fatalf("unexpected running process, output %q", out)
+ }
+}
+
+func TestExecinPassExtraFiles(t *testing.T) {
if testing.Short() {
return
}
@@ -211,106 +296,45 @@ func TestExecInTTY(t *testing.T) {
}
var stdout bytes.Buffer
- ps := &libcontainer.Process{
- Args: []string{"ps"},
- Env: standardEnvironment,
+ pipeout1, pipein1, err := os.Pipe()
+ pipeout2, pipein2, err := os.Pipe()
+ inprocess := &libcontainer.Process{
+ Args: []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
+ Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
+ ExtraFiles: []*os.File{pipein1, pipein2},
+ Stdin: nil,
+ Stdout: &stdout,
}
- console, err := ps.NewConsole(0)
- copy := make(chan struct{})
- go func() {
- io.Copy(&stdout, console)
- close(copy)
- }()
+ err = container.Start(inprocess)
if err != nil {
t.Fatal(err)
}
- err = container.Start(ps)
- if err != nil {
- t.Fatal(err)
- }
- select {
- case <-time.After(5 * time.Second):
- t.Fatal("Waiting for copy timed out")
- case <-copy:
- }
- if _, err := ps.Wait(); err != nil {
- t.Fatal(err)
- }
+
+ waitProcess(inprocess, t)
stdinW.Close()
- if _, err := process.Wait(); err != nil {
- t.Log(err)
+ waitProcess(process, t)
+
+ out := string(stdout.Bytes())
+ // fd 5 is the directory handle for /proc/$$/fd
+ if out != "0 1 2 3 4 5" {
+ t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to exec, got '%s'", out)
}
- out := stdout.String()
- if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
- t.Fatalf("unexpected running process, output %q", out)
- }
-}
-
-func TestExecInEnvironment(t *testing.T) {
- if testing.Short() {
- return
- }
- rootfs, err := newRootfs()
- if err != nil {
- t.Fatal(err)
- }
- defer remove(rootfs)
- config := newTemplateConfig(rootfs)
- container, err := newContainer(config)
- if err != nil {
- t.Fatal(err)
- }
- defer container.Destroy()
-
- // Execute a first process in the container
- stdinR, stdinW, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
- process := &libcontainer.Process{
- Args: []string{"cat"},
- Env: standardEnvironment,
- Stdin: stdinR,
- }
- err = container.Start(process)
- stdinR.Close()
- defer stdinW.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- buffers := newStdBuffers()
- process2 := &libcontainer.Process{
- Args: []string{"env"},
- Env: []string{
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
- "DEBUG=true",
- "DEBUG=false",
- "ENV=test",
- },
- Stdin: buffers.Stdin,
- Stdout: buffers.Stdout,
- Stderr: buffers.Stderr,
- }
- err = container.Start(process2)
- if err != nil {
- t.Fatal(err)
- }
- if _, err := process2.Wait(); err != nil {
- out := buffers.Stdout.String()
- t.Fatal(err, out)
- }
- stdinW.Close()
- if _, err := process.Wait(); err != nil {
- t.Log(err)
- }
- out := buffers.Stdout.String()
- // check execin's process environment
- if !strings.Contains(out, "DEBUG=false") ||
- !strings.Contains(out, "ENV=test") ||
- !strings.Contains(out, "HOME=/root") ||
- !strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
- strings.Contains(out, "DEBUG=true") {
- t.Fatalf("unexpected running process, output %q", out)
+ var buf = []byte{0}
+ _, err = pipeout1.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out1 := string(buf)
+ if out1 != "1" {
+ t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
+ }
+
+ _, err = pipeout2.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ out2 := string(buf)
+ if out2 != "2" {
+ t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/integration/init_test.go b/vendor/src/github.com/docker/libcontainer/integration/init_test.go
index f11834de34..28466036d1 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/init_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/init_test.go
@@ -1,11 +1,13 @@
package integration
import (
- "log"
"os"
"runtime"
+ "testing"
+ "github.com/Sirupsen/logrus"
"github.com/docker/libcontainer"
+ "github.com/docker/libcontainer/cgroups/systemd"
_ "github.com/docker/libcontainer/nsenter"
)
@@ -19,9 +21,40 @@ func init() {
runtime.LockOSThread()
factory, err := libcontainer.New("")
if err != nil {
- log.Fatalf("unable to initialize for container: %s", err)
+ logrus.Fatalf("unable to initialize for container: %s", err)
}
- if err := factory.StartInitialization(3); err != nil {
- log.Fatal(err)
+ if err := factory.StartInitialization(); err != nil {
+ logrus.Fatal(err)
}
}
+
+var (
+ factory libcontainer.Factory
+ systemdFactory libcontainer.Factory
+)
+
+func TestMain(m *testing.M) {
+ var (
+ err error
+ ret int = 0
+ )
+
+ logrus.SetOutput(os.Stderr)
+ logrus.SetLevel(logrus.InfoLevel)
+
+ factory, err = libcontainer.New(".", libcontainer.Cgroupfs)
+ if err != nil {
+ logrus.Error(err)
+ os.Exit(1)
+ }
+ if systemd.UseSystemd() {
+ systemdFactory, err = libcontainer.New(".", libcontainer.SystemdCgroups)
+ if err != nil {
+ logrus.Error(err)
+ os.Exit(1)
+ }
+ }
+
+ ret = m.Run()
+ os.Exit(ret)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
index cf4596864e..ffd7130ba2 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
@@ -6,8 +6,11 @@ import (
"io/ioutil"
"os"
"os/exec"
+ "path/filepath"
+ "runtime"
"strings"
"syscall"
+ "testing"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/configs"
@@ -38,6 +41,14 @@ func (b *stdBuffers) String() string {
return strings.Join(s, "|")
}
+// ok fails the test if an err is not nil.
+func ok(t testing.TB, err error) {
+ if err != nil {
+ _, file, line, _ := runtime.Caller(1)
+ t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
+ }
+}
+
// newRootfs creates a new tmp directory and copies the busybox root filesystem
func newRootfs() (string, error) {
dir, err := ioutil.TempDir("", "")
@@ -68,19 +79,13 @@ func copyBusybox(dest string) error {
}
func newContainer(config *configs.Config) (libcontainer.Container, error) {
- cgm := libcontainer.Cgroupfs
+ f := factory
+
if config.Cgroups != nil && config.Cgroups.Slice == "system.slice" {
- cgm = libcontainer.SystemdCgroups
+ f = systemdFactory
}
- factory, err := libcontainer.New(".",
- libcontainer.InitArgs(os.Args[0], "init", "--"),
- cgm,
- )
- if err != nil {
- return nil, err
- }
- return factory.Create("testCT", config)
+ return f.Create("testCT", config)
}
// runContainer runs the container with the specific config and arguments
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
index 5983031ae0..7bc40ddde2 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
@@ -101,10 +101,22 @@ func SetFileCreateLabel(fileLabel string) error {
// the MCS label should continue to be used. SELinux will use this field
// to make sure the content can not be shared by other containes.
func Relabel(path string, fileLabel string, relabel string) error {
+ exclude_path := []string{"/", "/usr", "/etc"}
if fileLabel == "" {
return nil
}
- if relabel == "z" {
+ for _, p := range exclude_path {
+ if path == p {
+ return fmt.Errorf("Relabeling of %s is not allowed", path)
+ }
+ }
+ if !strings.ContainsAny(relabel, "zZ") {
+ return nil
+ }
+ if strings.Contains(relabel, "z") && strings.Contains(relabel, "Z") {
+ return fmt.Errorf("Bad SELinux option z and Z can not be used together")
+ }
+ if strings.Contains(relabel, "z") {
c := selinux.NewContext(fileLabel)
c["level"] = "s0"
fileLabel = c.Get()
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
index 8629353f24..6ab0c67ca6 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
@@ -87,3 +87,31 @@ func TestDuplicateLabel(t *testing.T) {
t.Errorf("DisableSecOpt Failed level incorrect")
}
}
+func TestRelabel(t *testing.T) {
+ testdir := "/tmp/test"
+ label := "system_u:system_r:svirt_sandbox_file_t:s0:c1,c2"
+ if err := Relabel(testdir, "", "z"); err != nil {
+ t.Fatal("Relabel with no label failed: %v", err)
+ }
+ if err := Relabel(testdir, label, ""); err != nil {
+ t.Fatal("Relabel with no relabel field failed: %v", err)
+ }
+ if err := Relabel(testdir, label, "z"); err != nil {
+ t.Fatal("Relabel shared failed: %v", err)
+ }
+ if err := Relabel(testdir, label, "Z"); err != nil {
+ t.Fatal("Relabel unshared failed: %v", err)
+ }
+ if err := Relabel(testdir, label, "zZ"); err == nil {
+ t.Fatal("Relabel with shared and unshared succeeded")
+ }
+ if err := Relabel("/etc", label, "zZ"); err == nil {
+ t.Fatal("Relabel /etc succeeded")
+ }
+ if err := Relabel("/", label, ""); err == nil {
+ t.Fatal("Relabel / succeeded")
+ }
+ if err := Relabel("/usr", label, "Z"); err == nil {
+ t.Fatal("Relabel /usr succeeded")
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/README.md b/vendor/src/github.com/docker/libcontainer/nsenter/README.md
index ac94cba059..d1a60ef985 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/README.md
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/README.md
@@ -1,6 +1,25 @@
## nsenter
-The `nsenter` package registers a special init constructor that is called before the Go runtime has
-a chance to boot. This provides us the ability to `setns` on existing namespaces and avoid the issues
-that the Go runtime has with multiple threads. This constructor is only called if this package is
-registered, imported, in your go application and the argv 0 is `nsenter`.
+The `nsenter` package registers a special init constructor that is called before
+the Go runtime has a chance to boot. This provides us the ability to `setns` on
+existing namespaces and avoid the issues that the Go runtime has with multiple
+threads. This constructor will be called if this package is registered,
+imported, in your go application.
+
+The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
+package. In cgo, if the import of "C" is immediately preceded by a comment, that comment,
+called the preamble, is used as a header when compiling the C parts of the package.
+So every time we import package `nsenter`, the C code function `nsexec()` would be
+called. And package `nsenter` is now only imported in Docker execdriver, so every time
+before we call `execdriver.Exec()`, that C code would run.
+
+`nsexec()` will first check the environment variable `_LIBCONTAINER_INITPID`
+which will give the process of the container that should be joined. Namespaces fd will
+be found from `/proc/[pid]/ns` and set by `setns` syscall.
+
+And then get the pipe number from `_LIBCONTAINER_INITPIPE`, error message could
+be transfered through it. If tty is added, `_LIBCONTAINER_CONSOLE_PATH` will
+have value and start a console for output.
+
+Finally, `nsexec()` will clone a child process , exit the parent process and let
+the Go runtime take over.
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go b/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
index 34e1f52118..db27b8a409 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
@@ -24,7 +24,7 @@ func TestNsenterAlivePid(t *testing.T) {
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{w},
- Env: []string{fmt.Sprintf("_LIBCONTAINER_INITPID=%d", os.Getpid())},
+ Env: []string{fmt.Sprintf("_LIBCONTAINER_INITPID=%d", os.Getpid()), "_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c b/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
index e7658f3856..d8e45f3cda 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
@@ -66,7 +66,7 @@ void nsexec()
const int num = sizeof(namespaces) / sizeof(char *);
jmp_buf env;
char buf[PATH_MAX], *val;
- int i, tfd, child, len, consolefd = -1;
+ int i, tfd, child, len, pipenum, consolefd = -1;
pid_t pid;
char *console;
@@ -81,6 +81,19 @@ void nsexec()
exit(1);
}
+ val = getenv("_LIBCONTAINER_INITPIPE");
+ if (val == NULL) {
+ pr_perror("Child pipe not found");
+ exit(1);
+ }
+
+ pipenum = atoi(val);
+ snprintf(buf, sizeof(buf), "%d", pipenum);
+ if (strcmp(val, buf)) {
+ pr_perror("Unable to parse _LIBCONTAINER_INITPIPE");
+ exit(1);
+ }
+
console = getenv("_LIBCONTAINER_CONSOLE_PATH");
if (console != NULL) {
consolefd = open(console, O_RDWR);
@@ -124,6 +137,8 @@ void nsexec()
}
if (setjmp(env) == 1) {
+ // Child
+
if (setsid() == -1) {
pr_perror("setsid failed");
exit(1);
@@ -149,7 +164,11 @@ void nsexec()
// Finish executing, let the Go runtime take over.
return;
}
+ // Parent
+ // We must fork to actually enter the PID namespace, use CLONE_PARENT
+ // so the child can have the right parent, and we don't need to forward
+ // the child's exit code or resend its death signal.
child = clone_parent(&env);
if (child < 0) {
pr_perror("Unable to fork");
@@ -158,7 +177,7 @@ void nsexec()
len = snprintf(buf, sizeof(buf), "{ \"pid\" : %d }\n", child);
- if (write(3, buf, len) != len) {
+ if (write(pipenum, buf, len) != len) {
pr_perror("Unable to send a child pid");
kill(child, SIGKILL);
exit(1);
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/README.md b/vendor/src/github.com/docker/libcontainer/nsinit/README.md
index f2e66a866d..98bed0e8e7 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/README.md
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/README.md
@@ -65,3 +65,48 @@ You can identify if a process is running in a container by looking to see if
You may also specify an alternate root directory from where the `container.json`
file is read and where the `state.json` file will be saved.
+
+### How to use?
+
+Currently nsinit has 9 commands. Type `nsinit -h` to list all of them.
+And for every alternative command, you can also use `--help` to get more
+detailed help documents. For example, `nsinit config --help`.
+
+`nsinit` cli application is implemented using [cli.go](https://github.com/codegangsta/cli).
+Lots of details are handled in cli.go, so the implementation of `nsinit` itself
+is very clean and clear.
+
+* **config**
+It will generate a standard configuration file for a container. By default, it
+will generate as the template file in [config.go](https://github.com/docker/libcontainer/blob/master/nsinit/config.go#L192).
+It will modify the template if you have specified some configuration by options.
+* **exec**
+Starts a container and execute a new command inside it. Besides common options, it
+has some special options as below.
+ - `--tty,-t`: allocate a TTY to the container.
+ - `--config`: you can specify a configuration file. By default, it will use
+ template configuration.
+ - `--id`: specify the ID for a container. By default, the id is "nsinit".
+ - `--user,-u`: set the user, uid, and/or gid for the process. By default the
+ value is "root".
+ - `--cwd`: set the current working dir.
+ - `--env`: set environment variables for the process.
+* **init**
+It's an internal command that is called inside the container's namespaces to
+initialize the namespace and exec the user's process. It should not be called
+externally.
+* **oom**
+Display oom notifications for a container, you should specify container id.
+* **pause**
+Pause the container's processes, you should specify container id. It will use
+cgroup freeze subsystem to help.
+* **unpause**
+Unpause the container's processes. Same with `pause`.
+* **stats**
+Display statistics for the container, it will mainly show cgroup and network
+statistics.
+* **state**
+Get the container's current state. You can also read the state from `state.json`
+ in your container_id folder.
+* **help, h**
+Shows a list of commands or help for one command.
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/config.go b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
index e50bb3c11d..1eee9dd929 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/config.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
@@ -43,6 +43,7 @@ var createFlags = []cli.Flag{
cli.StringFlag{Name: "veth-address", Usage: "veth ip address"},
cli.StringFlag{Name: "veth-gateway", Usage: "veth gateway address"},
cli.IntFlag{Name: "veth-mtu", Usage: "veth mtu"},
+ cli.BoolFlag{Name: "cgroup", Usage: "mount the cgroup data for the container"},
}
var configCommand = cli.Command{
@@ -187,6 +188,12 @@ func modify(config *configs.Config, context *cli.Context) {
}
config.Networks = append(config.Networks, network)
}
+ if context.Bool("cgroup") {
+ config.Mounts = append(config.Mounts, &configs.Mount{
+ Destination: "/sys/fs/cgroup",
+ Device: "cgroup",
+ })
+ }
}
func getTemplate() *configs.Config {
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
index 9d302aa31e..cf40a5951a 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
@@ -23,6 +23,7 @@ var execCommand = cli.Command{
Action: execAction,
Flags: append([]cli.Flag{
cli.BoolFlag{Name: "tty,t", Usage: "allocate a TTY to the container"},
+ cli.BoolFlag{Name: "systemd", Usage: "Use systemd for managing cgroups, if available"},
cli.StringFlag{Name: "id", Value: "nsinit", Usage: "specify the ID for a container"},
cli.StringFlag{Name: "config", Value: "", Usage: "path to the configuration file"},
cli.StringFlag{Name: "user,u", Value: "root", Usage: "set the user, uid, and/or gid for the process"},
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/init.go b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
index 7b2cf1935d..24058d448f 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/init.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
@@ -3,7 +3,7 @@ package main
import (
"runtime"
- log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/docker/libcontainer"
_ "github.com/docker/libcontainer/nsenter"
@@ -13,14 +13,14 @@ var initCommand = cli.Command{
Name: "init",
Usage: "runs the init process inside the namespace",
Action: func(context *cli.Context) {
- log.SetLevel(log.DebugLevel)
+ logrus.SetLevel(logrus.DebugLevel)
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, err := libcontainer.New("")
if err != nil {
fatal(err)
}
- if err := factory.StartInitialization(3); err != nil {
+ if err := factory.StartInitialization(); err != nil {
fatal(err)
}
panic("This line should never been executed")
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/main.go b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
index eec064c2c4..0a59c9f1eb 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/main.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
@@ -3,7 +3,7 @@ package main
import (
"os"
- log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
@@ -29,18 +29,18 @@ func main() {
}
app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
- log.SetLevel(log.DebugLevel)
+ logrus.SetLevel(logrus.DebugLevel)
}
if path := context.GlobalString("log-file"); path != "" {
f, err := os.Create(path)
if err != nil {
return err
}
- log.SetOutput(f)
+ logrus.SetOutput(f)
}
return nil
}
if err := app.Run(os.Args); err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/oom.go b/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
index a59b753336..e92c558d86 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
@@ -1,8 +1,7 @@
package main
import (
- "log"
-
+ "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
@@ -15,16 +14,16 @@ var oomCommand = cli.Command{
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
n, err := container.NotifyOOM()
if err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
for x := range n {
// hack for calm down go1.4 gofmt
_ = x
- log.Printf("OOM notification received")
+ logrus.Printf("OOM notification received")
}
},
}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/pause.go b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
index 89af0b6f73..7b0cc3269b 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
@@ -1,8 +1,7 @@
package main
import (
- "log"
-
+ "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
@@ -15,10 +14,10 @@ var pauseCommand = cli.Command{
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
if err = container.Pause(); err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
},
}
@@ -32,10 +31,10 @@ var unpauseCommand = cli.Command{
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
if err = container.Resume(); err != nil {
- log.Fatal(err)
+ logrus.Fatal(err)
}
},
}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
index 4deca76640..fe9d0efe34 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
@@ -3,10 +3,12 @@ package main
import (
"encoding/json"
"fmt"
+ "github.com/Sirupsen/logrus"
"os"
"github.com/codegangsta/cli"
"github.com/docker/libcontainer"
+ "github.com/docker/libcontainer/cgroups/systemd"
"github.com/docker/libcontainer/configs"
)
@@ -29,7 +31,15 @@ func loadConfig(context *cli.Context) (*configs.Config, error) {
}
func loadFactory(context *cli.Context) (libcontainer.Factory, error) {
- return libcontainer.New(context.GlobalString("root"), libcontainer.Cgroupfs)
+ cgm := libcontainer.Cgroupfs
+ if context.Bool("systemd") {
+ if systemd.UseSystemd() {
+ cgm = libcontainer.SystemdCgroups
+ } else {
+ logrus.Warn("systemd cgroup flag passed, but systemd support for managing cgroups is not available.")
+ }
+ }
+ return libcontainer.New(context.GlobalString("root"), cgm)
}
func getContainer(context *cli.Context) (libcontainer.Container, error) {
diff --git a/vendor/src/github.com/docker/libcontainer/process.go b/vendor/src/github.com/docker/libcontainer/process.go
index 82fcff8c4c..7902d08ce4 100644
--- a/vendor/src/github.com/docker/libcontainer/process.go
+++ b/vendor/src/github.com/docker/libcontainer/process.go
@@ -23,7 +23,7 @@ type Process struct {
Env []string
// User will set the uid and gid of the executing process running inside the container
- // local to the contaienr's user and group configuration.
+ // local to the container's user and group configuration.
User string
// Cwd will change the processes current working directory inside the container's rootfs.
@@ -38,11 +38,14 @@ type Process struct {
// Stderr is a pointer to a writer which receives the standard error stream.
Stderr io.Writer
+ // ExtraFiles specifies additional open files to be inherited by the container
+ ExtraFiles []*os.File
+
// consolePath is the path to the console allocated to the container.
consolePath string
// Capabilities specify the capabilities to keep when executing the process inside the container
- // All capbilities not specified will be dropped from the processes capability mask
+ // All capabilities not specified will be dropped from the processes capability mask
Capabilities []string
ops processOperations
diff --git a/vendor/src/github.com/docker/libcontainer/process_linux.go b/vendor/src/github.com/docker/libcontainer/process_linux.go
index 1c74b65490..66411a8a9d 100644
--- a/vendor/src/github.com/docker/libcontainer/process_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/process_linux.go
@@ -119,6 +119,9 @@ func (p *setnsProcess) execSetns() error {
// terminate sends a SIGKILL to the forked process for the setns routine then waits to
// avoid the process becomming a zombie.
func (p *setnsProcess) terminate() error {
+ if p.cmd.Process == nil {
+ return nil
+ }
err := p.cmd.Process.Kill()
if _, werr := p.wait(); err == nil {
err = werr
diff --git a/vendor/src/github.com/docker/libcontainer/rootfs_linux.go b/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
index ab1a9a5fcb..4ddfff1fe2 100644
--- a/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
@@ -6,11 +6,15 @@ import (
"fmt"
"io/ioutil"
"os"
+ "os/exec"
+ "path"
"path/filepath"
"strings"
"syscall"
"time"
+ "github.com/docker/docker/pkg/symlink"
+ "github.com/docker/libcontainer/cgroups"
"github.com/docker/libcontainer/configs"
"github.com/docker/libcontainer/label"
)
@@ -24,9 +28,20 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
return newSystemError(err)
}
for _, m := range config.Mounts {
+ for _, precmd := range m.PremountCmds {
+ if err := mountCmd(precmd); err != nil {
+ return newSystemError(err)
+ }
+ }
if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil {
return newSystemError(err)
}
+
+ for _, postcmd := range m.PostmountCmds {
+ if err := mountCmd(postcmd); err != nil {
+ return newSystemError(err)
+ }
+ }
}
if err := createDevices(config); err != nil {
return newSystemError(err)
@@ -34,11 +49,6 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
if err := setupPtmx(config, console); err != nil {
return newSystemError(err)
}
- // stdin, stdout and stderr could be pointing to /dev/null from parent namespace.
- // re-open them inside this namespace.
- if err := reOpenDevNull(config.Rootfs); err != nil {
- return newSystemError(err)
- }
if err := setupDevSymlinks(config.Rootfs); err != nil {
return newSystemError(err)
}
@@ -53,6 +63,9 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
if err != nil {
return newSystemError(err)
}
+ if err := reOpenDevNull(config.Rootfs); err != nil {
+ return newSystemError(err)
+ }
if config.Readonlyfs {
if err := setReadonly(); err != nil {
return newSystemError(err)
@@ -62,6 +75,18 @@ func setupRootfs(config *configs.Config, console *linuxConsole) (err error) {
return nil
}
+func mountCmd(cmd configs.Command) error {
+
+ command := exec.Command(cmd.Path, cmd.Args[:]...)
+ command.Env = cmd.Env
+ command.Dir = cmd.Dir
+ if out, err := command.CombinedOutput(); err != nil {
+ return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err)
+ }
+
+ return nil
+}
+
func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
var (
dest = m.Destination
@@ -72,11 +97,19 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
}
switch m.Device {
- case "proc", "mqueue", "sysfs":
+ case "proc", "sysfs":
if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) {
return err
}
return syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), "")
+ case "mqueue":
+ if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) {
+ return err
+ }
+ if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), ""); err != nil {
+ return err
+ }
+ return label.SetFileLabel(dest, mountLabel)
case "tmpfs":
stat, err := os.Stat(dest)
if err != nil {
@@ -105,6 +138,16 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
// unable to bind anything to it.
return err
}
+ // ensure that the destination of the bind mount is resolved of symlinks at mount time because
+ // any previous mounts can invalidate the next mount's destination.
+ // this can happen when a user specifies mounts within other mounts to cause breakouts or other
+ // evil stuff to try to escape the container's rootfs.
+ if dest, err = symlink.FollowSymlinkInScope(filepath.Join(rootfs, m.Destination), rootfs); err != nil {
+ return err
+ }
+ if err := checkMountDestination(rootfs, dest); err != nil {
+ return err
+ }
if err := createIfNotExists(dest, stat.IsDir()); err != nil {
return err
}
@@ -126,12 +169,65 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
return err
}
}
+ case "cgroup":
+ mounts, err := cgroups.GetCgroupMounts()
+ if err != nil {
+ return err
+ }
+ var binds []*configs.Mount
+ for _, mm := range mounts {
+ dir, err := mm.GetThisCgroupDir()
+ if err != nil {
+ return err
+ }
+ binds = append(binds, &configs.Mount{
+ Device: "bind",
+ Source: filepath.Join(mm.Mountpoint, dir),
+ Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")),
+ Flags: syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY,
+ })
+ }
+ tmpfs := &configs.Mount{
+ Device: "tmpfs",
+ Destination: m.Destination,
+ Flags: syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV,
+ }
+ if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil {
+ return err
+ }
+ for _, b := range binds {
+ if err := mountToRootfs(b, rootfs, mountLabel); err != nil {
+ return err
+ }
+ }
default:
return fmt.Errorf("unknown mount device %q to %q", m.Device, m.Destination)
}
return nil
}
+// checkMountDestination checks to ensure that the mount destination is not over the
+// top of /proc or /sys.
+// dest is required to be an abs path and have any symlinks resolved before calling this function.
+func checkMountDestination(rootfs, dest string) error {
+ if filepath.Clean(rootfs) == filepath.Clean(dest) {
+ return fmt.Errorf("mounting into / is prohibited")
+ }
+ invalidDestinations := []string{
+ "/proc",
+ }
+ for _, invalid := range invalidDestinations {
+ path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest)
+ if err != nil {
+ return err
+ }
+ if path == "." || !strings.HasPrefix(path, "..") {
+ return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid)
+ }
+ }
+ return nil
+}
+
func setupDevSymlinks(rootfs string) error {
var links = [][2]string{
{"/proc/self/fd", "/dev/fd"},
@@ -156,11 +252,13 @@ func setupDevSymlinks(rootfs string) error {
return nil
}
-// If stdin, stdout or stderr are pointing to '/dev/null' in the global mount namespace,
-// this method will make them point to '/dev/null' in this namespace.
+// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs
+// this method will make them point to `/dev/null` in this container's rootfs. This
+// needs to be called after we chroot/pivot into the container's rootfs so that any
+// symlinks are resolved locally.
func reOpenDevNull(rootfs string) error {
var stat, devNullStat syscall.Stat_t
- file, err := os.Open(filepath.Join(rootfs, "/dev/null"))
+ file, err := os.Open("/dev/null")
if err != nil {
return fmt.Errorf("Failed to open /dev/null - %s", err)
}
@@ -240,9 +338,9 @@ func mknodDevice(dest string, node *configs.Device) error {
}
func prepareRoot(config *configs.Config) error {
- flag := syscall.MS_PRIVATE | syscall.MS_REC
- if config.NoPivotRoot {
- flag = syscall.MS_SLAVE | syscall.MS_REC
+ flag := syscall.MS_SLAVE | syscall.MS_REC
+ if config.Privatefs {
+ flag = syscall.MS_PRIVATE | syscall.MS_REC
}
if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil {
return err
@@ -355,3 +453,10 @@ func maskFile(path string) error {
}
return nil
}
+
+// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
+// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
+func writeSystemProperty(key, value string) error {
+ keyPath := strings.Replace(key, ".", "/", -1)
+ return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/rootfs_linux_test.go b/vendor/src/github.com/docker/libcontainer/rootfs_linux_test.go
new file mode 100644
index 0000000000..a3bb07708b
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/rootfs_linux_test.go
@@ -0,0 +1,37 @@
+// +build linux
+
+package libcontainer
+
+import "testing"
+
+func TestCheckMountDestOnProc(t *testing.T) {
+ dest := "/rootfs/proc/"
+ err := checkMountDestination("/rootfs", dest)
+ if err == nil {
+ t.Fatal("destination inside proc should return an error")
+ }
+}
+
+func TestCheckMountDestInSys(t *testing.T) {
+ dest := "/rootfs//sys/fs/cgroup"
+ err := checkMountDestination("/rootfs", dest)
+ if err != nil {
+ t.Fatal("destination inside /sys should not return an error")
+ }
+}
+
+func TestCheckMountDestFalsePositive(t *testing.T) {
+ dest := "/rootfs/sysfiles/fs/cgroup"
+ err := checkMountDestination("/rootfs", dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCheckMountRoot(t *testing.T) {
+ dest := "/rootfs"
+ err := checkMountDestination("/rootfs", dest)
+ if err == nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/standard_init_linux.go b/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
index 282832b568..251c09f696 100644
--- a/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
@@ -64,6 +64,13 @@ func (l *linuxStandardInit) Init() error {
if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
return err
}
+
+ for key, value := range l.config.Config.SystemProperties {
+ if err := writeSystemProperty(key, value); err != nil {
+ return err
+ }
+ }
+
for _, path := range l.config.Config.ReadonlyPaths {
if err := remountReadonly(path); err != nil {
return err
diff --git a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
index 228e6ccd7f..a3c4cbb273 100644
--- a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
@@ -12,8 +12,10 @@ import (
// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
var setNsMap = map[string]uintptr{
"linux/386": 346,
+ "linux/arm64": 268,
"linux/amd64": 308,
- "linux/arm": 374,
+ "linux/arm": 375,
+ "linux/ppc": 350,
"linux/ppc64": 350,
"linux/ppc64le": 350,
"linux/s390x": 339,
diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
index 6840c3770f..0816bf8281 100644
--- a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
+++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
@@ -1,4 +1,4 @@
-// +build linux,amd64 linux,ppc64 linux,ppc64le linux,s390x
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
package system
diff --git a/vendor/src/github.com/docker/libcontainer/update-vendor.sh b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
index b68f5d4610..6d03d770a3 100755
--- a/vendor/src/github.com/docker/libcontainer/update-vendor.sh
+++ b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
@@ -43,7 +43,7 @@ clone() {
clone git github.com/codegangsta/cli 1.1.0
clone git github.com/coreos/go-systemd v2
clone git github.com/godbus/dbus v2
-clone git github.com/Sirupsen/logrus v0.6.6
-clone git github.com/syndtr/gocapability 8e4cdcb
+clone git github.com/Sirupsen/logrus v0.7.3
+clone git github.com/syndtr/gocapability 66ef2aa
# intentionally not vendoring Docker itself... that'd be a circle :)
diff --git a/vendor/src/github.com/docker/libnetwork/.gitignore b/vendor/src/github.com/docker/libnetwork/.gitignore
new file mode 100644
index 0000000000..c03c9653ab
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/.gitignore
@@ -0,0 +1,33 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# Coverage
+*.tmp
+*.coverprofile
+
+# IDE files
+.project
+
+libnetwork-build.created
diff --git a/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json b/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json
new file mode 100644
index 0000000000..c28930943b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json
@@ -0,0 +1,85 @@
+{
+ "ImportPath": "github.com/docker/libnetwork",
+ "GoVersion": "go1.4.1",
+ "Packages": [
+ "./..."
+ ],
+ "Deps": [
+ {
+ "ImportPath": "github.com/Sirupsen/logrus",
+ "Comment": "v0.6.4-12-g467d9d5",
+ "Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/homedir",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/ioutils",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/mflag",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/parsers",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/plugins",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/proxy",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/reexec",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/stringid",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/docker/pkg/term",
+ "Comment": "v1.4.1-3479-ga9172f5",
+ "Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+ },
+ {
+ "ImportPath": "github.com/docker/libcontainer/user",
+ "Comment": "v1.4.0-495-g3e66118",
+ "Rev": "3e661186ba24f259d3860f067df052c7f6904bee"
+ },
+ {
+ "ImportPath": "github.com/godbus/dbus",
+ "Comment": "v2-3-g4160802",
+ "Rev": "41608027bdce7bfa8959d653a00b954591220e67"
+ },
+ {
+ "ImportPath": "github.com/gorilla/context",
+ "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd"
+ },
+ {
+ "ImportPath": "github.com/gorilla/mux",
+ "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
+ },
+ {
+ "ImportPath": "github.com/vishvananda/netlink",
+ "Rev": "8eb64238879fed52fd51c5b30ad20b928fb4c36c"
+ },
+ {
+ "ImportPath": "github.com/vishvananda/netns",
+ "Rev": "008d17ae001344769b031375bdb38a86219154c6"
+ }
+ ]
+}
diff --git a/vendor/src/github.com/docker/libnetwork/Godeps/Readme b/vendor/src/github.com/docker/libnetwork/Godeps/Readme
new file mode 100644
index 0000000000..4cdaa53d56
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/vendor/src/github.com/docker/libnetwork/LICENSE b/vendor/src/github.com/docker/libnetwork/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/src/github.com/docker/libnetwork/MAINTAINERS b/vendor/src/github.com/docker/libnetwork/MAINTAINERS
new file mode 100644
index 0000000000..398fd6de37
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/MAINTAINERS
@@ -0,0 +1,4 @@
+Alexandr Morozov (@LK4D4)
+Arnaud Porterie (@icecrime)
+Madhu Venugopal (@mavenugo)
+Jana Radhakrishnan (@mrjana)
diff --git a/vendor/src/github.com/docker/libnetwork/Makefile b/vendor/src/github.com/docker/libnetwork/Makefile
new file mode 100644
index 0000000000..59c181ecdc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Makefile
@@ -0,0 +1,78 @@
+.PHONY: all all-local build build-local check check-code check-format run-tests check-local install-deps coveralls circle-ci
+SHELL=/bin/bash
+build_image=libnetwork-build
+dockerargs = --privileged -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork
+container_env = -e "INSIDECONTAINER=-incontainer=true"
+docker = docker run --rm ${dockerargs} ${container_env} ${build_image}
+ciargs = -e "COVERALLS_TOKEN=$$COVERALLS_TOKEN" -e "INSIDECONTAINER=-incontainer=true"
+cidocker = docker run ${ciargs} ${dockerargs} golang:1.4
+
+all: ${build_image}.created
+ ${docker} make all-local
+
+all-local: check-local build-local
+
+${build_image}.created:
+ docker run --name=libnetworkbuild -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork golang:1.4 make install-deps
+ docker commit libnetworkbuild ${build_image}
+ docker rm libnetworkbuild
+ touch ${build_image}.created
+
+build: ${build_image}.created
+ ${docker} make build-local
+
+build-local:
+ $(shell which godep) go build -tags experimental ./...
+
+check: ${build_image}.created
+ ${docker} make check-local
+
+check-code:
+ @echo "Checking code... "
+ test -z "$$(golint ./... | tee /dev/stderr)"
+ go vet ./...
+ @echo "Done checking code"
+
+check-format:
+ @echo "Checking format... "
+ test -z "$$(goimports -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
+ @echo "Done checking format"
+
+run-tests:
+ @echo "Running tests... "
+ @echo "mode: count" > coverage.coverprofile
+ @for dir in $$(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d); do \
+ if ls $$dir/*.go &> /dev/null; then \
+ pushd . &> /dev/null ; \
+ cd $$dir ; \
+ $(shell which godep) go test ${INSIDECONTAINER} -test.parallel 3 -test.v -covermode=count -coverprofile=./profile.tmp ; \
+ ret=$$? ;\
+ if [ $$ret -ne 0 ]; then exit $$ret; fi ;\
+ popd &> /dev/null; \
+ if [ -f $$dir/profile.tmp ]; then \
+ cat $$dir/profile.tmp | tail -n +2 >> coverage.coverprofile ; \
+ rm $$dir/profile.tmp ; \
+ fi ; \
+ fi ; \
+ done
+ @echo "Done running tests"
+
+check-local: check-format check-code run-tests
+
+install-deps:
+ apt-get update && apt-get -y install iptables
+ go get github.com/tools/godep
+ go get github.com/golang/lint/golint
+ go get golang.org/x/tools/cmd/vet
+ go get golang.org/x/tools/cmd/goimports
+ go get golang.org/x/tools/cmd/cover
+ go get github.com/mattn/goveralls
+
+coveralls:
+ -@goveralls -service circleci -coverprofile=coverage.coverprofile -repotoken $$COVERALLS_TOKEN
+
+# CircleCI's Docker fails when cleaning up using the --rm flag
+# The following target is a workaround for this
+
+circle-ci:
+ @${cidocker} make install-deps check-local coveralls
diff --git a/vendor/src/github.com/docker/libnetwork/README.md b/vendor/src/github.com/docker/libnetwork/README.md
new file mode 100644
index 0000000000..e51eba1569
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/README.md
@@ -0,0 +1,86 @@
+# libnetwork - networking for containers
+
+[](https://circleci.com/gh/docker/libnetwork/tree/master) [](https://coveralls.io/r/docker/libnetwork) [](https://godoc.org/github.com/docker/libnetwork)
+
+Libnetwork provides a native Go implementation for connecting containers
+
+The goal of libnetwork is to deliver a robust Container Network Model that provides a consistent programming interface and the required network abstractions for applications.
+
+**NOTE**: libnetwork project is under heavy development and is not ready for general use.
+
+#### Design
+Please refer to the [design](docs/design.md) for more information.
+
+#### Using libnetwork
+
+There are many networking solutions available to suit a broad range of use-cases. libnetwork uses a driver / plugin model to support all of these solutions while abstracting the complexity of the driver implementations by exposing a simple and consistent Network Model to users.
+
+
+```go
+ // Create a new controller instance
+ controller := libnetwork.New()
+
+ // Select and configure the network driver
+ networkType := "bridge"
+
+ driverOptions := options.Generic{}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = driverOptions
+ err := controller.ConfigureNetworkDriver(networkType, genericOption)
+ if err != nil {
+ return
+ }
+
+ // Create a network for containers to join.
+ // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+ network, err := controller.NewNetwork(networkType, "network1")
+ if err != nil {
+ return
+ }
+
+ // For each new container: allocate IP and interfaces. The returned network
+ // settings will be used for container infos (inspect and such), as well as
+ // iptables rules for port publishing. This info is contained or accessible
+ // from the returned endpoint.
+ ep, err := network.CreateEndpoint("Endpoint1")
+ if err != nil {
+ return
+ }
+
+ // A container can join the endpoint by providing the container ID to the join
+ // api which returns the sandbox key which can be used to access the sandbox
+ // created for the container during join.
+ // Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+ _, err = ep.Join("container1",
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"))
+ if err != nil {
+ return
+ }
+
+ // libentwork client can check the endpoint's operational data via the Info() API
+ epInfo, err := ep.DriverInfo()
+ mapData, ok := epInfo[netlabel.PortMap]
+ if ok {
+ portMapping, ok := mapData.([]netutils.PortBinding)
+ if ok {
+ fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping)
+ }
+ }
+
+```
+#### Current Status
+Please watch this space for updates on the progress.
+
+Currently libnetwork is nothing more than an attempt to modularize the Docker platform's networking subsystem by moving it into libnetwork as a library.
+
+## Future
+Please refer to [roadmap](ROADMAP.md) for more information.
+
+## Contributing
+
+Want to hack on libnetwork? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply.
+
+## Copyright and license
+Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons.
+
diff --git a/vendor/src/github.com/docker/libnetwork/ROADMAP.md b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
new file mode 100644
index 0000000000..af8964559b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
@@ -0,0 +1,29 @@
+# Roadmap
+
+Libnetwork is a young project and is still being defined.
+This document defines the high-level goals of the project and defines the release-relationship to the Docker Platform.
+
+* [Goals](#goals)
+* [Project Planning](#project-planning): release-relationship to the Docker Platform.
+
+## Long-term Goal
+
+libnetwork project will follow Docker and Linux philosophy of delivering small, highly modular and composable tools that works well independently.
+libnetwork aims to satisfy that composable need for Networking in Containers.
+
+## Short-term Goals
+
+- Modularize the networking logic in Docker Engine and libcontainer in to a single, reusable library
+- Replace the networking subsystem of Docker Engine, with libnetwork
+- Define a flexible model that allows local and remote drivers to provide networking to containers
+- Provide a stand-alone tool "dnet" for managing and testing libnetwork
+
+## Project Planning
+
+Libnetwork versions do not map 1:1 with Docker Platform releases.
+Milestones and Project Pages are used to define the set of features that are included in each release.
+
+| Platform Version | Libnetwork Version | Planning |
+|------------------|--------------------|----------|
+| Docker 1.7 | [0.3](https://github.com/docker/libnetwork/milestones/0.3) | [Project Page](https://github.com/docker/libnetwork/wiki/Docker-1.7-Project-Page) |
+| Docker 1.8 | [1.0](https://github.com/docker/libnetwork/milestones/1.0) | [Project Page](https://github.com/docker/libnetwork/wiki/Docker-1.8-Project-Page) |
diff --git a/vendor/src/github.com/docker/libnetwork/api/api.go b/vendor/src/github.com/docker/libnetwork/api/api.go
new file mode 100644
index 0000000000..97305d34fc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/api.go
@@ -0,0 +1,541 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/types"
+ "github.com/gorilla/mux"
+)
+
+var (
+ successResponse = responseStatus{Status: "Success", StatusCode: http.StatusOK}
+ createdResponse = responseStatus{Status: "Created", StatusCode: http.StatusCreated}
+ mismatchResponse = responseStatus{Status: "Body/URI parameter mismatch", StatusCode: http.StatusBadRequest}
+ badQueryresponse = responseStatus{Status: "Unsupported query", StatusCode: http.StatusBadRequest}
+)
+
+const (
+ // Resource name regex
+ regex = "[a-zA-Z_0-9-]+"
+ // Router URL variable definition
+ nwName = "{" + urlNwName + ":" + regex + "}"
+ nwID = "{" + urlNwID + ":" + regex + "}"
+ nwPID = "{" + urlNwPID + ":" + regex + "}"
+ epName = "{" + urlEpName + ":" + regex + "}"
+ epID = "{" + urlEpID + ":" + regex + "}"
+ epPID = "{" + urlEpPID + ":" + regex + "}"
+ cnID = "{" + urlCnID + ":" + regex + "}"
+
+ // Internal URL variable name, they can be anything
+ urlNwName = "network-name"
+ urlNwID = "network-id"
+ urlNwPID = "network-partial-id"
+ urlEpName = "endpoint-name"
+ urlEpID = "endpoint-id"
+ urlEpPID = "endpoint-partial-id"
+ urlCnID = "container-id"
+)
+
+// NewHTTPHandler creates and initialize the HTTP handler to serve the requests for libnetwork
+func NewHTTPHandler(c libnetwork.NetworkController) func(w http.ResponseWriter, req *http.Request) {
+ h := &httpHandler{c: c}
+ h.initRouter()
+ return h.handleRequest
+}
+
+type responseStatus struct {
+ Status string
+ StatusCode int
+}
+
+func (r *responseStatus) isOK() bool {
+ return r.StatusCode == http.StatusOK || r.StatusCode == http.StatusCreated
+}
+
+type processor func(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus)
+
+type httpHandler struct {
+ c libnetwork.NetworkController
+ r *mux.Router
+}
+
+func (h *httpHandler) handleRequest(w http.ResponseWriter, req *http.Request) {
+ // Make sure the service is there
+ if h.c == nil {
+ http.Error(w, "NetworkController is not available", http.StatusServiceUnavailable)
+ return
+ }
+
+ // Get handler from router and execute it
+ h.r.ServeHTTP(w, req)
+}
+
+func (h *httpHandler) initRouter() {
+ m := map[string][]struct {
+ url string
+ qrs []string
+ fct processor
+ }{
+ "GET": {
+ // Order matters
+ {"/networks", []string{"name", nwName}, procGetNetworks},
+ {"/networks", []string{"partial-id", nwPID}, procGetNetworks},
+ {"/networks", nil, procGetNetworks},
+ {"/networks/" + nwID, nil, procGetNetwork},
+ {"/networks/" + nwID + "/endpoints", []string{"name", epName}, procGetEndpoints},
+ {"/networks/" + nwID + "/endpoints", []string{"partial-id", epPID}, procGetEndpoints},
+ {"/networks/" + nwID + "/endpoints", nil, procGetEndpoints},
+ {"/networks/" + nwID + "/endpoints/" + epID, nil, procGetEndpoint},
+ },
+ "POST": {
+ {"/networks", nil, procCreateNetwork},
+ {"/networks/" + nwID + "/endpoints", nil, procCreateEndpoint},
+ {"/networks/" + nwID + "/endpoints/" + epID + "/containers", nil, procJoinEndpoint},
+ },
+ "DELETE": {
+ {"/networks/" + nwID, nil, procDeleteNetwork},
+ {"/networks/" + nwID + "/endpoints/" + epID, nil, procDeleteEndpoint},
+ {"/networks/id/" + nwID + "/endpoints/" + epID + "/containers/" + cnID, nil, procLeaveEndpoint},
+ },
+ }
+
+ h.r = mux.NewRouter()
+ for method, routes := range m {
+ for _, route := range routes {
+ r := h.r.Path("/{.*}" + route.url).Methods(method).HandlerFunc(makeHandler(h.c, route.fct))
+ if route.qrs != nil {
+ r.Queries(route.qrs...)
+ }
+ }
+ }
+}
+
+func makeHandler(ctrl libnetwork.NetworkController, fct processor) http.HandlerFunc {
+ return func(w http.ResponseWriter, req *http.Request) {
+ var (
+ body []byte
+ err error
+ )
+ if req.Body != nil {
+ body, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ http.Error(w, "Invalid body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+
+ res, rsp := fct(ctrl, mux.Vars(req), body)
+ if !rsp.isOK() {
+ http.Error(w, rsp.Status, rsp.StatusCode)
+ return
+ }
+ if res != nil {
+ writeJSON(w, rsp.StatusCode, res)
+ }
+ }
+}
+
+/*****************
+ Resource Builders
+******************/
+
+func buildNetworkResource(nw libnetwork.Network) *networkResource {
+ r := &networkResource{}
+ if nw != nil {
+ r.Name = nw.Name()
+ r.ID = nw.ID()
+ r.Type = nw.Type()
+ epl := nw.Endpoints()
+ r.Endpoints = make([]*endpointResource, 0, len(epl))
+ for _, e := range epl {
+ epr := buildEndpointResource(e)
+ r.Endpoints = append(r.Endpoints, epr)
+ }
+ }
+ return r
+}
+
+func buildEndpointResource(ep libnetwork.Endpoint) *endpointResource {
+ r := &endpointResource{}
+ if ep != nil {
+ r.Name = ep.Name()
+ r.ID = ep.ID()
+ r.Network = ep.Network()
+ }
+ return r
+}
+
+/**************
+ Options Parser
+***************/
+
+func (ej *endpointJoin) parseOptions() []libnetwork.EndpointOption {
+ var setFctList []libnetwork.EndpointOption
+ if ej.HostName != "" {
+ setFctList = append(setFctList, libnetwork.JoinOptionHostname(ej.HostName))
+ }
+ if ej.DomainName != "" {
+ setFctList = append(setFctList, libnetwork.JoinOptionDomainname(ej.DomainName))
+ }
+ if ej.HostsPath != "" {
+ setFctList = append(setFctList, libnetwork.JoinOptionHostsPath(ej.HostsPath))
+ }
+ if ej.ResolvConfPath != "" {
+ setFctList = append(setFctList, libnetwork.JoinOptionResolvConfPath(ej.ResolvConfPath))
+ }
+ if ej.UseDefaultSandbox {
+ setFctList = append(setFctList, libnetwork.JoinOptionUseDefaultSandbox())
+ }
+ if ej.DNS != nil {
+ for _, d := range ej.DNS {
+ setFctList = append(setFctList, libnetwork.JoinOptionDNS(d))
+ }
+ }
+ if ej.ExtraHosts != nil {
+ for _, e := range ej.ExtraHosts {
+ setFctList = append(setFctList, libnetwork.JoinOptionExtraHost(e.Name, e.Address))
+ }
+ }
+ if ej.ParentUpdates != nil {
+ for _, p := range ej.ParentUpdates {
+ setFctList = append(setFctList, libnetwork.JoinOptionParentUpdate(p.EndpointID, p.Name, p.Address))
+ }
+ }
+ return setFctList
+}
+
+/******************
+ Process functions
+*******************/
+
+/***************************
+ NetworkController interface
+****************************/
+func procCreateNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ var create networkCreate
+
+ err := json.Unmarshal(body, &create)
+ if err != nil {
+ return "", &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+ }
+
+ nw, err := c.NewNetwork(create.NetworkType, create.Name, nil)
+ if err != nil {
+ return "", convertNetworkError(err)
+ }
+
+ return nw.ID(), &createdResponse
+}
+
+func procGetNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ t, by := detectNetworkTarget(vars)
+ nw, errRsp := findNetwork(c, t, by)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+ return buildNetworkResource(nw), &successResponse
+}
+
+func procGetNetworks(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ var list []*networkResource
+
+ // Look for query filters and validate
+ name, queryByName := vars[urlNwName]
+ shortID, queryByPid := vars[urlNwPID]
+ if queryByName && queryByPid {
+ return nil, &badQueryresponse
+ }
+
+ if queryByName {
+ if nw, errRsp := findNetwork(c, name, byName); errRsp.isOK() {
+ list = append(list, buildNetworkResource(nw))
+ }
+ } else if queryByPid {
+ // Return all the prefix-matching networks
+ l := func(nw libnetwork.Network) bool {
+ if strings.HasPrefix(nw.ID(), shortID) {
+ list = append(list, buildNetworkResource(nw))
+ }
+ return false
+ }
+ c.WalkNetworks(l)
+ } else {
+ for _, nw := range c.Networks() {
+ list = append(list, buildNetworkResource(nw))
+ }
+ }
+
+ return list, &successResponse
+}
+
+/******************
+ Network interface
+*******************/
+func procCreateEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ var ec endpointCreate
+
+ err := json.Unmarshal(body, &ec)
+ if err != nil {
+ return "", &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+ }
+
+ nwT, nwBy := detectNetworkTarget(vars)
+ n, errRsp := findNetwork(c, nwT, nwBy)
+ if !errRsp.isOK() {
+ return "", errRsp
+ }
+
+ var setFctList []libnetwork.EndpointOption
+ if ec.ExposedPorts != nil {
+ setFctList = append(setFctList, libnetwork.CreateOptionExposedPorts(ec.ExposedPorts))
+ }
+ if ec.PortMapping != nil {
+ setFctList = append(setFctList, libnetwork.CreateOptionPortMapping(ec.PortMapping))
+ }
+
+ ep, err := n.CreateEndpoint(ec.Name, setFctList...)
+ if err != nil {
+ return "", convertNetworkError(err)
+ }
+
+ return ep.ID(), &createdResponse
+}
+
+func procGetEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ nwT, nwBy := detectNetworkTarget(vars)
+ epT, epBy := detectEndpointTarget(vars)
+
+ ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ return buildEndpointResource(ep), &successResponse
+}
+
+func procGetEndpoints(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ // Look for query filters and validate
+ name, queryByName := vars[urlEpName]
+ shortID, queryByPid := vars[urlEpPID]
+ if queryByName && queryByPid {
+ return nil, &badQueryresponse
+ }
+
+ nwT, nwBy := detectNetworkTarget(vars)
+ nw, errRsp := findNetwork(c, nwT, nwBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ var list []*endpointResource
+
+ // If query parameter is specified, return a filtered collection
+ if queryByName {
+ if ep, errRsp := findEndpoint(c, nwT, name, nwBy, byName); errRsp.isOK() {
+ list = append(list, buildEndpointResource(ep))
+ }
+ } else if queryByPid {
+ // Return all the prefix-matching networks
+ l := func(ep libnetwork.Endpoint) bool {
+ if strings.HasPrefix(ep.ID(), shortID) {
+ list = append(list, buildEndpointResource(ep))
+ }
+ return false
+ }
+ nw.WalkEndpoints(l)
+ } else {
+ for _, ep := range nw.Endpoints() {
+ epr := buildEndpointResource(ep)
+ list = append(list, epr)
+ }
+ }
+
+ return list, &successResponse
+}
+
+func procDeleteNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ target, by := detectNetworkTarget(vars)
+
+ nw, errRsp := findNetwork(c, target, by)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ err := nw.Delete()
+ if err != nil {
+ return nil, convertNetworkError(err)
+ }
+
+ return nil, &successResponse
+}
+
+/******************
+ Endpoint interface
+*******************/
+func procJoinEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ var ej endpointJoin
+ err := json.Unmarshal(body, &ej)
+ if err != nil {
+ return nil, &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+ }
+
+ nwT, nwBy := detectNetworkTarget(vars)
+ epT, epBy := detectEndpointTarget(vars)
+
+ ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ cd, err := ep.Join(ej.ContainerID, ej.parseOptions()...)
+ if err != nil {
+ return nil, convertNetworkError(err)
+ }
+ return cd, &successResponse
+}
+
+func procLeaveEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ nwT, nwBy := detectNetworkTarget(vars)
+ epT, epBy := detectEndpointTarget(vars)
+
+ ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ err := ep.Leave(vars[urlCnID])
+ if err != nil {
+ return nil, convertNetworkError(err)
+ }
+
+ return nil, &successResponse
+}
+
+func procDeleteEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+ nwT, nwBy := detectNetworkTarget(vars)
+ epT, epBy := detectEndpointTarget(vars)
+
+ ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+
+ err := ep.Delete()
+ if err != nil {
+ return nil, convertNetworkError(err)
+ }
+
+ return nil, &successResponse
+}
+
+/***********
+ Utilities
+************/
+const (
+ byID = iota
+ byName
+)
+
+func detectNetworkTarget(vars map[string]string) (string, int) {
+ if target, ok := vars[urlNwName]; ok {
+ return target, byName
+ }
+ if target, ok := vars[urlNwID]; ok {
+ return target, byID
+ }
+ // vars are populated from the URL, following cannot happen
+ panic("Missing URL variable parameter for network")
+}
+
+func detectEndpointTarget(vars map[string]string) (string, int) {
+ if target, ok := vars[urlEpName]; ok {
+ return target, byName
+ }
+ if target, ok := vars[urlEpID]; ok {
+ return target, byID
+ }
+ // vars are populated from the URL, following cannot happen
+ panic("Missing URL variable parameter for endpoint")
+}
+
+func findNetwork(c libnetwork.NetworkController, s string, by int) (libnetwork.Network, *responseStatus) {
+ var (
+ nw libnetwork.Network
+ err error
+ )
+ switch by {
+ case byID:
+ nw, err = c.NetworkByID(s)
+ case byName:
+ nw, err = c.NetworkByName(s)
+ default:
+ panic(fmt.Sprintf("unexpected selector for network search: %d", by))
+ }
+ if err != nil {
+ if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
+ return nil, &responseStatus{Status: "Resource not found: Network", StatusCode: http.StatusNotFound}
+ }
+ return nil, &responseStatus{Status: err.Error(), StatusCode: http.StatusBadRequest}
+ }
+ return nw, &successResponse
+}
+
+func findEndpoint(c libnetwork.NetworkController, ns, es string, nwBy, epBy int) (libnetwork.Endpoint, *responseStatus) {
+ nw, errRsp := findNetwork(c, ns, nwBy)
+ if !errRsp.isOK() {
+ return nil, errRsp
+ }
+ var (
+ err error
+ ep libnetwork.Endpoint
+ )
+ switch epBy {
+ case byID:
+ ep, err = nw.EndpointByID(es)
+ case byName:
+ ep, err = nw.EndpointByName(es)
+ default:
+ panic(fmt.Sprintf("unexpected selector for endpoint search: %d", epBy))
+ }
+ if err != nil {
+ if _, ok := err.(libnetwork.ErrNoSuchEndpoint); ok {
+ return nil, &responseStatus{Status: "Resource not found: Endpoint", StatusCode: http.StatusNotFound}
+ }
+ return nil, &responseStatus{Status: err.Error(), StatusCode: http.StatusBadRequest}
+ }
+ return ep, &successResponse
+}
+
+func convertNetworkError(err error) *responseStatus {
+ var code int
+ switch err.(type) {
+ case types.BadRequestError:
+ code = http.StatusBadRequest
+ case types.ForbiddenError:
+ code = http.StatusForbidden
+ case types.NotFoundError:
+ code = http.StatusNotFound
+ case types.TimeoutError:
+ code = http.StatusRequestTimeout
+ case types.NotImplementedError:
+ code = http.StatusNotImplemented
+ case types.NoServiceError:
+ code = http.StatusServiceUnavailable
+ case types.InternalError:
+ code = http.StatusInternalServerError
+ default:
+ code = http.StatusInternalServerError
+ }
+ return &responseStatus{Status: err.Error(), StatusCode: code}
+}
+
+func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(code)
+ return json.NewEncoder(w).Encode(v)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/api/api_test.go b/vendor/src/github.com/docker/libnetwork/api/api_test.go
new file mode 100644
index 0000000000..7fbbee504a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/api_test.go
@@ -0,0 +1,1566 @@
+package api
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/types"
+)
+
+const (
+ bridgeNetType = "bridge"
+ bridgeName = "docker0"
+)
+
+func getEmptyGenericOption() map[string]interface{} {
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = options.Generic{}
+ return genericOption
+}
+
+func i2s(i interface{}) string {
+ s, ok := i.(string)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2s for %v", i))
+ }
+ return s
+}
+
+func i2e(i interface{}) *endpointResource {
+ s, ok := i.(*endpointResource)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2e for %v", i))
+ }
+ return s
+}
+
+func i2c(i interface{}) *libnetwork.ContainerData {
+ s, ok := i.(*libnetwork.ContainerData)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2c for %v", i))
+ }
+ return s
+}
+
+func i2eL(i interface{}) []*endpointResource {
+ s, ok := i.([]*endpointResource)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2eL for %v", i))
+ }
+ return s
+}
+
+func i2n(i interface{}) *networkResource {
+ s, ok := i.(*networkResource)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2n for %v", i))
+ }
+ return s
+}
+
+func i2nL(i interface{}) []*networkResource {
+ s, ok := i.([]*networkResource)
+ if !ok {
+ panic(fmt.Sprintf("Failed i2nL for %v", i))
+ }
+ return s
+}
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func TestJoinOptionParser(t *testing.T) {
+ hn := "host1"
+ dn := "docker.com"
+ hp := "/etc/hosts"
+ rc := "/etc/resolv.conf"
+ dnss := []string{"8.8.8.8", "172.28.34.5"}
+ ehs := []endpointExtraHost{endpointExtraHost{Name: "extra1", Address: "172.28.9.1"}, endpointExtraHost{Name: "extra2", Address: "172.28.9.2"}}
+ pus := []endpointParentUpdate{endpointParentUpdate{EndpointID: "abc123def456", Name: "serv1", Address: "172.28.30.123"}}
+
+ ej := endpointJoin{
+ HostName: hn,
+ DomainName: dn,
+ HostsPath: hp,
+ ResolvConfPath: rc,
+ DNS: dnss,
+ ExtraHosts: ehs,
+ ParentUpdates: pus,
+ UseDefaultSandbox: true,
+ }
+
+ if len(ej.parseOptions()) != 10 {
+ t.Fatalf("Failed to generate all libnetwork.EndpointJoinOption methods libnetwork.EndpointJoinOption method")
+ }
+
+}
+
+func TestJson(t *testing.T) {
+ nc := networkCreate{NetworkType: bridgeNetType}
+ b, err := json.Marshal(nc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var ncp networkCreate
+ err = json.Unmarshal(b, &ncp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if nc.NetworkType != ncp.NetworkType {
+ t.Fatalf("Incorrect networkCreate after json encoding/deconding: %v", ncp)
+ }
+
+ jl := endpointJoin{ContainerID: "abcdef456789"}
+ b, err = json.Marshal(jl)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var jld endpointJoin
+ err = json.Unmarshal(b, &jld)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if jl.ContainerID != jld.ContainerID {
+ t.Fatalf("Incorrect endpointJoin after json encoding/deconding: %v", jld)
+ }
+}
+
+func TestCreateDeleteNetwork(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ badBody, err := json.Marshal("bad body")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars := make(map[string]string)
+ _, errRsp := procCreateNetwork(c, nil, badBody)
+ if errRsp == &createdResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected StatusBadRequest status code, got: %v", errRsp)
+ }
+
+ incompleteBody, err := json.Marshal(networkCreate{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, errRsp = procCreateNetwork(c, vars, incompleteBody)
+ if errRsp == &createdResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected StatusBadRequest status code, got: %v", errRsp)
+ }
+
+ ops := make(map[string]interface{})
+ ops[netlabel.GenericData] = options.Generic{}
+ nc := networkCreate{Name: "network_1", NetworkType: bridgeNetType, Options: ops}
+ goodBody, err := json.Marshal(nc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, errRsp = procCreateNetwork(c, vars, goodBody)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ vars[urlNwName] = ""
+ _, errRsp = procDeleteNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+
+ vars[urlNwName] = "abc"
+ _, errRsp = procDeleteNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+
+ vars[urlNwName] = "network_1"
+ _, errRsp = procDeleteNetwork(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+}
+
+func TestGetNetworksAndEndpoints(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nc := networkCreate{Name: "sh", NetworkType: bridgeNetType}
+ body, err := json.Marshal(nc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars := make(map[string]string)
+ inid, errRsp := procCreateNetwork(c, vars, body)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ nid, ok := inid.(string)
+ if !ok {
+ t.FailNow()
+ }
+
+ ec1 := endpointCreate{
+ Name: "ep1",
+ ExposedPorts: []types.TransportPort{
+ types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+ types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+ types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+ },
+ PortMapping: []types.PortBinding{
+ types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+ types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+ types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+ },
+ }
+ b1, err := json.Marshal(ec1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ec2 := endpointCreate{Name: "ep2"}
+ b2, err := json.Marshal(ec2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars[urlNwName] = "sh"
+ vars[urlEpName] = "ep1"
+ ieid1, errRsp := procCreateEndpoint(c, vars, b1)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ eid1 := i2s(ieid1)
+ vars[urlEpName] = "ep2"
+ ieid2, errRsp := procCreateEndpoint(c, vars, b2)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ eid2 := i2s(ieid2)
+
+ vars[urlNwName] = ""
+ vars[urlEpName] = "ep1"
+ _, errRsp = procGetEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure but succeeded: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+ }
+
+ vars = make(map[string]string)
+ vars[urlNwName] = "sh"
+ vars[urlEpID] = ""
+ _, errRsp = procGetEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure but succeeded: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+ }
+
+ vars = make(map[string]string)
+ vars[urlNwID] = ""
+ vars[urlEpID] = eid1
+ _, errRsp = procGetEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure but succeeded: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+ }
+
+ // nw by name and ep by id
+ vars[urlNwName] = "sh"
+ i1, errRsp := procGetEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ // nw by name and ep by name
+ delete(vars, urlEpID)
+ vars[urlEpName] = "ep1"
+ i2, errRsp := procGetEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ // nw by id and ep by name
+ delete(vars, urlNwName)
+ vars[urlNwID] = nid
+ i3, errRsp := procGetEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ // nw by id and ep by id
+ delete(vars, urlEpName)
+ vars[urlEpID] = eid1
+ i4, errRsp := procGetEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ id1 := i2e(i1).ID
+ if id1 != i2e(i2).ID || id1 != i2e(i3).ID || id1 != i2e(i4).ID {
+ t.Fatalf("Endpoints retireved via different query parameters differ: %v, %v, %v, %v", i1, i2, i3, i4)
+ }
+
+ vars[urlNwName] = ""
+ _, errRsp = procGetEndpoints(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ delete(vars, urlNwName)
+ vars[urlNwID] = "fakeID"
+ _, errRsp = procGetEndpoints(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlNwID] = nid
+ _, errRsp = procGetEndpoints(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ vars[urlNwName] = "sh"
+ iepList, errRsp := procGetEndpoints(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ epList := i2eL(iepList)
+ if len(epList) != 2 {
+ t.Fatalf("Did not return the expected number (2) of endpoint resources: %d", len(epList))
+ }
+ if "sh" != epList[0].Network || "sh" != epList[1].Network {
+ t.Fatalf("Did not find expected network name in endpoint resources")
+ }
+
+ vars = make(map[string]string)
+ vars[urlNwName] = ""
+ _, errRsp = procGetNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Exepected failure, got: %v", errRsp)
+ }
+ vars[urlNwName] = "shhhhh"
+ _, errRsp = procGetNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Exepected failure, got: %v", errRsp)
+ }
+ vars[urlNwName] = "sh"
+ inr1, errRsp := procGetNetwork(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ nr1 := i2n(inr1)
+
+ delete(vars, urlNwName)
+ vars[urlNwID] = "cacca"
+ _, errRsp = procGetNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ vars[urlNwID] = nid
+ inr2, errRsp := procGetNetwork(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("procgetNetworkByName() != procgetNetworkById(), %v vs %v", inr1, inr2)
+ }
+ nr2 := i2n(inr2)
+ if nr1.Name != nr2.Name || nr1.Type != nr2.Type || nr1.ID != nr2.ID || len(nr1.Endpoints) != len(nr2.Endpoints) {
+ t.Fatalf("Get by name and Get failure: %v", errRsp)
+ }
+
+ if len(nr1.Endpoints) != 2 {
+ t.Fatalf("Did not find the expected number (2) of endpoint resources in the network resource: %d", len(nr1.Endpoints))
+ }
+ for _, er := range nr1.Endpoints {
+ if er.ID != eid1 && er.ID != eid2 {
+ t.Fatalf("Did not find the expected endpoint resources in the network resource: %v", nr1.Endpoints)
+ }
+ }
+
+ iList, errRsp := procGetNetworks(c, nil, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ netList := i2nL(iList)
+ if len(netList) != 1 {
+ t.Fatalf("Did not return the expected number of network resources")
+ }
+ if nid != netList[0].ID {
+ t.Fatalf("Did not find expected network %s: %v", nid, netList)
+ }
+
+ _, errRsp = procDeleteNetwork(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Exepected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "ep1"
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ delete(vars, urlEpName)
+ iepList, errRsp = procGetEndpoints(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ epList = i2eL(iepList)
+ if len(epList) != 1 {
+ t.Fatalf("Did not return the expected number (1) of endpoint resources: %d", len(epList))
+ }
+
+ vars[urlEpName] = "ep2"
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ iepList, errRsp = procGetEndpoints(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ epList = i2eL(iepList)
+ if len(epList) != 0 {
+ t.Fatalf("Did not return the expected number (0) of endpoint resources: %d", len(epList))
+ }
+
+ _, errRsp = procDeleteNetwork(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ iList, errRsp = procGetNetworks(c, nil, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ netList = i2nL(iList)
+ if len(netList) != 0 {
+ t.Fatalf("Did not return the expected number of network resources")
+ }
+}
+
+func TestDetectGetNetworksInvalidQueryComposition(t *testing.T) {
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars := map[string]string{urlNwName: "x", urlNwPID: "y"}
+ _, errRsp := procGetNetworks(c, vars, nil)
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected %d. Got: %v", http.StatusBadRequest, errRsp)
+ }
+}
+
+func TestDetectGetEndpointsInvalidQueryComposition(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.NewNetwork(bridgeNetType, "network", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars := map[string]string{urlNwName: "network", urlEpName: "x", urlEpPID: "y"}
+ _, errRsp := procGetEndpoints(c, vars, nil)
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected %d. Got: %v", http.StatusBadRequest, errRsp)
+ }
+}
+
+func TestFindNetworkUtil(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nw, err := c.NewNetwork(bridgeNetType, "network", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ nid := nw.ID()
+
+ defer checkPanic(t)
+ findNetwork(c, "", -1)
+
+ _, errRsp := findNetwork(c, "", byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected %d, but got: %d", http.StatusBadRequest, errRsp.StatusCode)
+ }
+
+ n, errRsp := findNetwork(c, nid, byID)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexpected failure: %v", errRsp)
+ }
+ if n == nil {
+ t.Fatalf("Unexpected nil libnetwork.Network")
+ }
+ if nid != n.ID() {
+ t.Fatalf("Incorrect libnetwork.Network resource. It has different id: %v", n)
+ }
+ if "network" != n.Name() {
+ t.Fatalf("Incorrect libnetwork.Network resource. It has different name: %v", n)
+ }
+
+ n, errRsp = findNetwork(c, "network", byName)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexpected failure: %v", errRsp)
+ }
+ if n == nil {
+ t.Fatalf("Unexpected nil libnetwork.Network")
+ }
+ if nid != n.ID() {
+ t.Fatalf("Incorrect libnetwork.Network resource. It has different id: %v", n)
+ }
+ if "network" != n.Name() {
+ t.Fatalf("Incorrect libnetwork.Network resource. It has different name: %v", n)
+ }
+
+ n.Delete()
+
+ _, errRsp = findNetwork(c, nid, byID)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+
+ _, errRsp = findNetwork(c, "network", byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+}
+
+func TestCreateDeleteEndpoints(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nc := networkCreate{Name: "firstNet", NetworkType: bridgeNetType}
+ body, err := json.Marshal(nc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars := make(map[string]string)
+ i, errRsp := procCreateNetwork(c, vars, body)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ nid := i2s(i)
+
+ vbad, err := json.Marshal("bad endppoint create data")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars[urlNwName] = "firstNet"
+ _, errRsp = procCreateEndpoint(c, vars, vbad)
+ if errRsp == &createdResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+
+ b, err := json.Marshal(endpointCreate{Name: ""})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars[urlNwName] = "secondNet"
+ _, errRsp = procCreateEndpoint(c, vars, b)
+ if errRsp == &createdResponse {
+ t.Fatalf("Expected to fail but succeeded")
+ }
+
+ vars[urlNwName] = "firstNet"
+ _, errRsp = procCreateEndpoint(c, vars, b)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure but succeeded: %v", errRsp)
+ }
+
+ b, err = json.Marshal(endpointCreate{Name: "firstEp"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i, errRsp = procCreateEndpoint(c, vars, b)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+ eid := i2s(i)
+
+ _, errRsp = findEndpoint(c, "myNet", "firstEp", byName, byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure but succeeded: %v", errRsp)
+ }
+
+ ep0, errRsp := findEndpoint(c, nid, "firstEp", byID, byName)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep1, errRsp := findEndpoint(c, "firstNet", "firstEp", byName, byName)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep2, errRsp := findEndpoint(c, nid, eid, byID, byID)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep3, errRsp := findEndpoint(c, "firstNet", eid, byName, byID)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ if ep0.ID() != ep1.ID() || ep0.ID() != ep2.ID() || ep0.ID() != ep3.ID() {
+ t.Fatalf("Diffenrent queries returned different endpoints: \nep0: %v\nep1: %v\nep2: %v\nep3: %v", ep0, ep1, ep2, ep3)
+ }
+
+ vars = make(map[string]string)
+ vars[urlNwName] = ""
+ vars[urlEpName] = "ep1"
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlNwName] = "firstNet"
+ vars[urlEpName] = ""
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "ep2"
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "firstEp"
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ _, errRsp = findEndpoint(c, "firstNet", "firstEp", byName, byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+}
+
+func TestJoinLeave(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nb, err := json.Marshal(networkCreate{Name: "network", NetworkType: bridgeNetType})
+ if err != nil {
+ t.Fatal(err)
+ }
+ vars := make(map[string]string)
+ _, errRsp := procCreateNetwork(c, vars, nb)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ eb, err := json.Marshal(endpointCreate{Name: "endpoint"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ vars[urlNwName] = "network"
+ _, errRsp = procCreateEndpoint(c, vars, eb)
+ if errRsp != &createdResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ vbad, err := json.Marshal("bad data")
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, errRsp = procJoinEndpoint(c, vars, vbad)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "endpoint"
+ bad, err := json.Marshal(endpointJoin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, errRsp = procJoinEndpoint(c, vars, bad)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ cid := "abcdefghi"
+ jl := endpointJoin{ContainerID: cid}
+ jlb, err := json.Marshal(jl)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ vars = make(map[string]string)
+ vars[urlNwName] = ""
+ vars[urlEpName] = ""
+ _, errRsp = procJoinEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlNwName] = "network"
+ vars[urlEpName] = ""
+ _, errRsp = procJoinEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "epoint"
+ _, errRsp = procJoinEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlEpName] = "endpoint"
+ cdi, errRsp := procJoinEndpoint(c, vars, jlb)
+ if errRsp != &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ cd := i2c(cdi)
+ if cd.SandboxKey == "" {
+ t.Fatalf("Empty sandbox key")
+ }
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlNwName] = "network2"
+ _, errRsp = procLeaveEndpoint(c, vars, vbad)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ _, errRsp = procLeaveEndpoint(c, vars, bad)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ vars = make(map[string]string)
+ vars[urlNwName] = ""
+ vars[urlEpName] = ""
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ vars[urlNwName] = "network"
+ vars[urlEpName] = ""
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ vars[urlEpName] = "2epoint"
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+ vars[urlEpName] = "epoint"
+ vars[urlCnID] = "who"
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ delete(vars, urlCnID)
+ vars[urlEpName] = "endpoint"
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ vars[urlCnID] = cid
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ _, errRsp = procLeaveEndpoint(c, vars, jlb)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, got: %v", errRsp)
+ }
+
+ _, errRsp = procDeleteEndpoint(c, vars, nil)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+}
+
+func TestFindEndpointUtil(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ nw, err := c.NewNetwork(bridgeNetType, "second", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ nid := nw.ID()
+
+ ep, err := nw.CreateEndpoint("secondEp", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ eid := ep.ID()
+
+ defer checkPanic(t)
+ findEndpoint(c, nid, "", byID, -1)
+
+ _, errRsp := findEndpoint(c, nid, "", byID, byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, but got: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusBadRequest {
+ t.Fatalf("Expected %d, but got: %d", http.StatusBadRequest, errRsp.StatusCode)
+ }
+
+ ep0, errRsp := findEndpoint(c, nid, "secondEp", byID, byName)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep1, errRsp := findEndpoint(c, "second", "secondEp", byName, byName)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep2, errRsp := findEndpoint(c, nid, eid, byID, byID)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ ep3, errRsp := findEndpoint(c, "second", eid, byName, byID)
+ if errRsp != &successResponse {
+ t.Fatalf("Unexepected failure: %v", errRsp)
+ }
+
+ if ep0 != ep1 || ep0 != ep2 || ep0 != ep3 {
+ t.Fatalf("Diffenrent queries returned different endpoints")
+ }
+
+ ep.Delete()
+
+ _, errRsp = findEndpoint(c, nid, "secondEp", byID, byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, but got: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+
+ _, errRsp = findEndpoint(c, "second", "secondEp", byName, byName)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, but got: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+
+ _, errRsp = findEndpoint(c, nid, eid, byID, byID)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, but got: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+
+ _, errRsp = findEndpoint(c, "second", eid, byName, byID)
+ if errRsp == &successResponse {
+ t.Fatalf("Expected failure, but got: %v", errRsp)
+ }
+ if errRsp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+ }
+}
+
+func checkPanic(t *testing.T) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ } else {
+ t.Fatalf("Expected to panic, but suceeded")
+ }
+}
+
+func TestDetectNetworkTargetPanic(t *testing.T) {
+ defer checkPanic(t)
+ vars := make(map[string]string)
+ detectNetworkTarget(vars)
+}
+
+func TestDetectEndpointTargetPanic(t *testing.T) {
+ defer checkPanic(t)
+ vars := make(map[string]string)
+ detectEndpointTarget(vars)
+}
+
+func TestResponseStatus(t *testing.T) {
+ list := []int{
+ http.StatusBadGateway,
+ http.StatusBadRequest,
+ http.StatusConflict,
+ http.StatusContinue,
+ http.StatusExpectationFailed,
+ http.StatusForbidden,
+ http.StatusFound,
+ http.StatusGatewayTimeout,
+ http.StatusGone,
+ http.StatusHTTPVersionNotSupported,
+ http.StatusInternalServerError,
+ http.StatusLengthRequired,
+ http.StatusMethodNotAllowed,
+ http.StatusMovedPermanently,
+ http.StatusMultipleChoices,
+ http.StatusNoContent,
+ http.StatusNonAuthoritativeInfo,
+ http.StatusNotAcceptable,
+ http.StatusNotFound,
+ http.StatusNotModified,
+ http.StatusPartialContent,
+ http.StatusPaymentRequired,
+ http.StatusPreconditionFailed,
+ http.StatusProxyAuthRequired,
+ http.StatusRequestEntityTooLarge,
+ http.StatusRequestTimeout,
+ http.StatusRequestURITooLong,
+ http.StatusRequestedRangeNotSatisfiable,
+ http.StatusResetContent,
+ http.StatusServiceUnavailable,
+ http.StatusSwitchingProtocols,
+ http.StatusTemporaryRedirect,
+ http.StatusUnauthorized,
+ http.StatusUnsupportedMediaType,
+ http.StatusUseProxy,
+ }
+ for _, c := range list {
+ r := responseStatus{StatusCode: c}
+ if r.isOK() {
+ t.Fatalf("isOK() returned true for code% d", c)
+ }
+ }
+
+ r := responseStatus{StatusCode: http.StatusOK}
+ if !r.isOK() {
+ t.Fatalf("isOK() failed")
+ }
+
+ r = responseStatus{StatusCode: http.StatusCreated}
+ if !r.isOK() {
+ t.Fatalf("isOK() failed")
+ }
+}
+
+// Local structs for end to end testing of api.go
+type localReader struct {
+ data []byte
+ beBad bool
+}
+
+func newLocalReader(data []byte) *localReader {
+ lr := &localReader{data: make([]byte, len(data))}
+ copy(lr.data, data)
+ return lr
+}
+
+func (l *localReader) Read(p []byte) (n int, err error) {
+ if l.beBad {
+ return 0, errors.New("I am a bad reader")
+ }
+ if p == nil {
+ return -1, fmt.Errorf("nil buffer passed")
+ }
+ if l.data == nil || len(l.data) == 0 {
+ return 0, io.EOF
+ }
+ copy(p[:], l.data[:])
+ return len(l.data), io.EOF
+}
+
+type localResponseWriter struct {
+ body []byte
+ statusCode int
+}
+
+func newWriter() *localResponseWriter {
+ return &localResponseWriter{}
+}
+
+func (f *localResponseWriter) Header() http.Header {
+ return make(map[string][]string, 0)
+}
+
+func (f *localResponseWriter) Write(data []byte) (int, error) {
+ if data == nil {
+ return -1, fmt.Errorf("nil data passed")
+ }
+
+ f.body = make([]byte, len(data))
+ copy(f.body, data)
+
+ return len(f.body), nil
+}
+
+func (f *localResponseWriter) WriteHeader(c int) {
+ f.statusCode = c
+}
+
+func TestwriteJSON(t *testing.T) {
+ testCode := 55
+ testData, err := json.Marshal("test data")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rsp := newWriter()
+ writeJSON(rsp, testCode, testData)
+ if rsp.statusCode != testCode {
+ t.Fatalf("writeJSON() failed to set the status code. Expected %d. Got %d", testCode, rsp.statusCode)
+ }
+ if !bytes.Equal(testData, rsp.body) {
+ t.Fatalf("writeJSON() failed to set the body. Expected %s. Got %s", testData, rsp.body)
+ }
+
+}
+
+func TestHttpHandlerUninit(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ h := &httpHandler{c: c}
+ h.initRouter()
+ if h.r == nil {
+ t.Fatalf("initRouter() did not initialize the router")
+ }
+
+ rsp := newWriter()
+ req, err := http.NewRequest("GET", "/v1.19/networks", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ handleRequest := NewHTTPHandler(nil)
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusServiceUnavailable {
+ t.Fatalf("Expected (%d). Got (%d): %s", http.StatusServiceUnavailable, rsp.statusCode, rsp.body)
+ }
+
+ handleRequest = NewHTTPHandler(c)
+
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Expected (%d). Got: (%d): %s", http.StatusOK, rsp.statusCode, rsp.body)
+ }
+
+ var list []*networkResource
+ err = json.Unmarshal(rsp.body, &list)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(list) != 0 {
+ t.Fatalf("Expected empty list. Got %v", list)
+ }
+
+ n, err := c.NewNetwork(bridgeNetType, "didietro", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ nwr := buildNetworkResource(n)
+ expected, err := json.Marshal([]*networkResource{nwr})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+ if len(rsp.body) == 0 {
+ t.Fatalf("Empty list of networks")
+ }
+ if bytes.Equal(rsp.body, expected) {
+ t.Fatalf("Incorrect list of networks in response's body")
+ }
+}
+
+func TestHttpHandlerBadBody(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ rsp := newWriter()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest := NewHTTPHandler(c)
+
+ req, err := http.NewRequest("POST", "/v1.19/networks", &localReader{beBad: true})
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusBadRequest {
+ t.Fatalf("Unexpected status code. Expected (%d). Got (%d): %s.", http.StatusBadRequest, rsp.statusCode, string(rsp.body))
+ }
+
+ body := []byte{}
+ lr := newLocalReader(body)
+ req, err = http.NewRequest("POST", "/v1.19/networks", lr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusBadRequest {
+ t.Fatalf("Unexpected status code. Expected (%d). Got (%d): %s.", http.StatusBadRequest, rsp.statusCode, string(rsp.body))
+ }
+}
+
+func TestEndToEnd(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ rsp := newWriter()
+
+ c, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest := NewHTTPHandler(c)
+
+ // Create network
+ nc := networkCreate{Name: "network-fiftyfive", NetworkType: bridgeNetType}
+ body, err := json.Marshal(nc)
+ if err != nil {
+ t.Fatal(err)
+ }
+ lr := newLocalReader(body)
+ req, err := http.NewRequest("POST", "/v1.19/networks", lr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusCreated {
+ t.Fatalf("Unexpectded status code. Expected (%d). Got (%d): %s.", http.StatusCreated, rsp.statusCode, string(rsp.body))
+ }
+ if len(rsp.body) == 0 {
+ t.Fatalf("Empty response body")
+ }
+
+ var nid string
+ err = json.Unmarshal(rsp.body, &nid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Query networks collection
+ req, err = http.NewRequest("GET", "/v1.19/networks", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ b0 := make([]byte, len(rsp.body))
+ copy(b0, rsp.body)
+
+ req, err = http.NewRequest("GET", "/v1.19/networks?name=network-fiftyfive", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ if !bytes.Equal(b0, rsp.body) {
+ t.Fatalf("Expected same body from GET /networks and GET /networks?name= when only network exist.")
+ }
+
+ // Query network by name
+ req, err = http.NewRequest("GET", "/v1.19/networks?name=culo", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ var list []*networkResource
+ err = json.Unmarshal(rsp.body, &list)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(list) != 0 {
+ t.Fatalf("Expected empty list. Got %v", list)
+ }
+
+ req, err = http.NewRequest("GET", "/v1.19/networks?name=network-fiftyfive", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ err = json.Unmarshal(rsp.body, &list)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(list) == 0 {
+ t.Fatalf("Expected non empty list")
+ }
+ if list[0].Name != "network-fiftyfive" || nid != list[0].ID {
+ t.Fatalf("Incongruent resource found: %v", list[0])
+ }
+
+ // Query network by partial id
+ chars := []byte(nid)
+ partial := string(chars[0 : len(chars)/2])
+ req, err = http.NewRequest("GET", "/v1.19/networks?partial-id="+partial, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ err = json.Unmarshal(rsp.body, &list)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(list) == 0 {
+ t.Fatalf("Expected non empty list")
+ }
+ if list[0].Name != "network-fiftyfive" || nid != list[0].ID {
+ t.Fatalf("Incongruent resource found: %v", list[0])
+ }
+
+ // Get network by id
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ var nwr networkResource
+ err = json.Unmarshal(rsp.body, &nwr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nwr.Name != "network-fiftyfive" || nid != nwr.ID {
+ t.Fatalf("Incongruent resource found: %v", nwr)
+ }
+
+ // Create endpoint
+ eb, err := json.Marshal(endpointCreate{Name: "ep-TwentyTwo"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ lr = newLocalReader(eb)
+ req, err = http.NewRequest("POST", "/v1.19/networks/"+nid+"/endpoints", lr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusCreated {
+ t.Fatalf("Unexpectded status code. Expected (%d). Got (%d): %s.", http.StatusCreated, rsp.statusCode, string(rsp.body))
+ }
+ if len(rsp.body) == 0 {
+ t.Fatalf("Empty response body")
+ }
+
+ var eid string
+ err = json.Unmarshal(rsp.body, &eid)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Query endpoint(s)
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?name=bla", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+ var epList []*endpointResource
+ err = json.Unmarshal(rsp.body, &epList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(epList) != 0 {
+ t.Fatalf("Expected empty list. Got %v", epList)
+ }
+
+ // Query endpoint by name
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?name=ep-TwentyTwo", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ err = json.Unmarshal(rsp.body, &epList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(epList) == 0 {
+ t.Fatalf("Empty response body")
+ }
+ if epList[0].Name != "ep-TwentyTwo" || eid != epList[0].ID {
+ t.Fatalf("Incongruent resource found: %v", epList[0])
+ }
+
+ // Query endpoint by partial id
+ chars = []byte(eid)
+ partial = string(chars[0 : len(chars)/2])
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?partial-id="+partial, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ err = json.Unmarshal(rsp.body, &epList)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(epList) == 0 {
+ t.Fatalf("Empty response body")
+ }
+ if epList[0].Name != "ep-TwentyTwo" || eid != epList[0].ID {
+ t.Fatalf("Incongruent resource found: %v", epList[0])
+ }
+
+ // Get endpoint by id
+ req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints/"+eid, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ handleRequest(rsp, req)
+ if rsp.statusCode != http.StatusOK {
+ t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+ }
+
+ var epr endpointResource
+ err = json.Unmarshal(rsp.body, &epr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if epr.Name != "ep-TwentyTwo" || epr.ID != eid {
+ t.Fatalf("Incongruent resource found: %v", epr)
+ }
+}
+
+type bre struct{}
+
+func (b *bre) Error() string {
+ return "I am a bad request error"
+}
+func (b *bre) BadRequest() {}
+
+type nfe struct{}
+
+func (n *nfe) Error() string {
+ return "I am a not found error"
+}
+func (n *nfe) NotFound() {}
+
+type forb struct{}
+
+func (f *forb) Error() string {
+ return "I am a bad request error"
+}
+func (f *forb) Forbidden() {}
+
+type notimpl struct{}
+
+func (nip *notimpl) Error() string {
+ return "I am a not implemented error"
+}
+func (nip *notimpl) NotImplemented() {}
+
+type inter struct{}
+
+func (it *inter) Error() string {
+ return "I am a internal error"
+}
+func (it *inter) Internal() {}
+
+type tout struct{}
+
+func (to *tout) Error() string {
+ return "I am a timeout error"
+}
+func (to *tout) Timeout() {}
+
+type noserv struct{}
+
+func (nos *noserv) Error() string {
+ return "I am a no service error"
+}
+func (nos *noserv) NoService() {}
+
+type notclassified struct{}
+
+func (noc *notclassified) Error() string {
+ return "I am a non classified error"
+}
+
+func TestErrorConversion(t *testing.T) {
+ if convertNetworkError(new(bre)).StatusCode != http.StatusBadRequest {
+ t.Fatalf("Failed to recognize BadRequest error")
+ }
+
+ if convertNetworkError(new(nfe)).StatusCode != http.StatusNotFound {
+ t.Fatalf("Failed to recognize NotFound error")
+ }
+
+ if convertNetworkError(new(forb)).StatusCode != http.StatusForbidden {
+ t.Fatalf("Failed to recognize Forbidden error")
+ }
+
+ if convertNetworkError(new(notimpl)).StatusCode != http.StatusNotImplemented {
+ t.Fatalf("Failed to recognize NotImplemented error")
+ }
+
+ if convertNetworkError(new(inter)).StatusCode != http.StatusInternalServerError {
+ t.Fatalf("Failed to recognize Internal error")
+ }
+
+ if convertNetworkError(new(tout)).StatusCode != http.StatusRequestTimeout {
+ t.Fatalf("Failed to recognize Timeout error")
+ }
+
+ if convertNetworkError(new(noserv)).StatusCode != http.StatusServiceUnavailable {
+ t.Fatalf("Failed to recognize No Service error")
+ }
+
+ if convertNetworkError(new(notclassified)).StatusCode != http.StatusInternalServerError {
+ t.Fatalf("Failed to recognize not classified error as Internal error")
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/api/types.go b/vendor/src/github.com/docker/libnetwork/api/types.go
new file mode 100644
index 0000000000..2490a84fe4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/types.go
@@ -0,0 +1,67 @@
+package api
+
+import "github.com/docker/libnetwork/types"
+
+/***********
+ Resources
+************/
+
+// networkResource is the body of the "get network" http response message
+type networkResource struct {
+ Name string
+ ID string
+ Type string
+ Endpoints []*endpointResource
+}
+
+// endpointResource is the body of the "get endpoint" http response message
+type endpointResource struct {
+ Name string
+ ID string
+ Network string
+}
+
+/***********
+ Body types
+ ************/
+
+// networkCreate is the expected body of the "create network" http request message
+type networkCreate struct {
+ Name string
+ NetworkType string
+ Options map[string]interface{}
+}
+
+// endpointCreate represents the body of the "create endpoint" http request message
+type endpointCreate struct {
+ Name string
+ ExposedPorts []types.TransportPort
+ PortMapping []types.PortBinding
+}
+
+// endpointJoin represents the expected body of the "join endpoint" or "leave endpoint" http request messages
+type endpointJoin struct {
+ ContainerID string
+ HostName string
+ DomainName string
+ HostsPath string
+ ResolvConfPath string
+ DNS []string
+ ExtraHosts []endpointExtraHost
+ ParentUpdates []endpointParentUpdate
+ UseDefaultSandbox bool
+}
+
+// EndpointExtraHost represents the extra host object
+type endpointExtraHost struct {
+ Name string
+ Address string
+}
+
+// EndpointParentUpdate is the object carrying the information about the
+// endpoint parent that needs to be updated
+type endpointParentUpdate struct {
+ EndpointID string
+ Name string
+ Address string
+}
diff --git a/vendor/src/github.com/docker/libnetwork/circle.yml b/vendor/src/github.com/docker/libnetwork/circle.yml
new file mode 100644
index 0000000000..d02f6a92ed
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/circle.yml
@@ -0,0 +1,12 @@
+machine:
+ services:
+ - docker
+
+dependencies:
+ override:
+ - echo "Nothing to install"
+
+test:
+ override:
+ - make circle-ci
+
diff --git a/vendor/src/github.com/docker/libnetwork/client/client.go b/vendor/src/github.com/docker/libnetwork/client/client.go
new file mode 100644
index 0000000000..4bc86da4be
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client.go
@@ -0,0 +1,111 @@
+package client
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "reflect"
+ "strings"
+
+ flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CallFunc provides environment specific call utility to invoke backend functions from UI
+type CallFunc func(string, string, interface{}, map[string][]string) (io.ReadCloser, int, error)
+
+// NetworkCli is the UI object for network subcmds
+type NetworkCli struct {
+ out io.Writer
+ err io.Writer
+ call CallFunc
+}
+
+// NewNetworkCli is a convenient function to create a NetworkCli object
+func NewNetworkCli(out, err io.Writer, call CallFunc) *NetworkCli {
+ return &NetworkCli{
+ out: out,
+ err: err,
+ call: call,
+ }
+}
+
+// getMethod is Borrowed from Docker UI which uses reflection to identify the UI Handler
+func (cli *NetworkCli) getMethod(args ...string) (func(string, ...string) error, bool) {
+ camelArgs := make([]string, len(args))
+ for i, s := range args {
+ if len(s) == 0 {
+ return nil, false
+ }
+ camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:])
+ }
+ methodName := "Cmd" + strings.Join(camelArgs, "")
+ method := reflect.ValueOf(cli).MethodByName(methodName)
+ if !method.IsValid() {
+ return nil, false
+ }
+ return method.Interface().(func(string, ...string) error), true
+}
+
+// Cmd is borrowed from Docker UI and acts as the entry point for network UI commands.
+// network UI commands are designed to be invoked from multiple parent chains
+func (cli *NetworkCli) Cmd(chain string, args ...string) error {
+ if len(args) > 2 {
+ method, exists := cli.getMethod(args[:3]...)
+ if exists {
+ return method(chain+" "+args[0]+" "+args[1], args[3:]...)
+ }
+ }
+ if len(args) > 1 {
+ method, exists := cli.getMethod(args[:2]...)
+ if exists {
+ return method(chain+" "+args[0], args[2:]...)
+ }
+ }
+ if len(args) > 0 {
+ method, exists := cli.getMethod(args[0])
+ if !exists {
+ return fmt.Errorf("%s: '%s' is not a %s command. See '%s --help'.\n", chain, args[0], chain, chain)
+ }
+ return method(chain, args[1:]...)
+ }
+ flag.Usage()
+ return nil
+}
+
+// Subcmd is borrowed from Docker UI and performs the same function of configuring the subCmds
+func (cli *NetworkCli) Subcmd(chain, name, signature, description string, exitOnError bool) *flag.FlagSet {
+ var errorHandling flag.ErrorHandling
+ if exitOnError {
+ errorHandling = flag.ExitOnError
+ } else {
+ errorHandling = flag.ContinueOnError
+ }
+ flags := flag.NewFlagSet(name, errorHandling)
+ flags.Usage = func() {
+ options := ""
+ if signature != "" {
+ signature = " " + signature
+ }
+ if flags.FlagCountUndeprecated() > 0 {
+ options = " [OPTIONS]"
+ }
+ fmt.Fprintf(cli.out, "\nUsage: %s %s%s%s\n\n%s\n\n", chain, name, options, signature, description)
+ flags.SetOutput(cli.out)
+ flags.PrintDefaults()
+ }
+ return flags
+}
+
+func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
+ if stream != nil {
+ defer stream.Close()
+ }
+ if err != nil {
+ return nil, statusCode, err
+ }
+ body, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, -1, err
+ }
+ return body, statusCode, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go b/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go
new file mode 100644
index 0000000000..9592b3ca77
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go
@@ -0,0 +1,124 @@
+// +build experimental
+
+package client
+
+import (
+ "bytes"
+ "testing"
+
+ _ "github.com/docker/libnetwork/netutils"
+)
+
+func TestClientNetworkServiceInvalidCommand(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "invalid")
+ if err == nil {
+ t.Fatalf("Passing invalid commands must fail")
+ }
+}
+
+func TestClientNetworkServiceCreate(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "create", mockServiceName, mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceRm(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "rm", mockServiceName, mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceLs(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "ls", mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceInfo(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "info", mockServiceName, mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceInfoById(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "info", mockServiceID, mockNwID)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceJoin(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "join", mockContainerID, mockServiceName, mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkServiceLeave(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "service", "leave", mockContainerID, mockServiceName, mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+// Docker Flag processing in flag.go uses os.Exit() frequently, even for --help
+// TODO : Handle the --help test-case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateHelp(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ return nil, 0, nil
+ }
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create", "--help")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+}
+*/
+
+// Docker flag processing in flag.go uses os.Exit(1) for incorrect parameter case.
+// TODO : Handle the missing argument case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateMissingArgument(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ return nil, 0, nil
+ }
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create")
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+*/
diff --git a/vendor/src/github.com/docker/libnetwork/client/client_test.go b/vendor/src/github.com/docker/libnetwork/client/client_test.go
new file mode 100644
index 0000000000..3b2f3a8eb9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client_test.go
@@ -0,0 +1,212 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ _ "github.com/docker/libnetwork/netutils"
+)
+
+// nopCloser is used to provide a dummy CallFunc for Cmd()
+type nopCloser struct {
+ io.Reader
+}
+
+func (nopCloser) Close() error { return nil }
+
+func TestMain(m *testing.M) {
+ setupMockHTTPCallback()
+ os.Exit(m.Run())
+}
+
+var callbackFunc func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error)
+var mockNwJSON, mockNwListJSON, mockServiceJSON, mockServiceListJSON []byte
+var mockNwName = "test"
+var mockNwID = "2a3456789"
+var mockServiceName = "testSrv"
+var mockServiceID = "2a3456789"
+var mockContainerID = "2a3456789"
+
+func setupMockHTTPCallback() {
+ var list []networkResource
+ nw := networkResource{Name: mockNwName, ID: mockNwID}
+ mockNwJSON, _ = json.Marshal(nw)
+ list = append(list, nw)
+ mockNwListJSON, _ = json.Marshal(list)
+
+ var srvList []endpointResource
+ ep := endpointResource{Name: mockServiceName, ID: mockServiceID, Network: mockNwName}
+ mockServiceJSON, _ = json.Marshal(ep)
+ srvList = append(srvList, ep)
+ mockServiceListJSON, _ = json.Marshal(srvList)
+
+ callbackFunc = func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ var rsp string
+ switch method {
+ case "GET":
+ if strings.Contains(path, fmt.Sprintf("networks?name=%s", mockNwName)) {
+ rsp = string(mockNwListJSON)
+ } else if strings.Contains(path, "networks?name=") {
+ rsp = "[]"
+ } else if strings.Contains(path, fmt.Sprintf("networks?partial-id=%s", mockNwID)) {
+ rsp = string(mockNwListJSON)
+ } else if strings.Contains(path, "networks?partial-id=") {
+ rsp = "[]"
+ } else if strings.HasSuffix(path, "networks") {
+ rsp = string(mockNwListJSON)
+ } else if strings.HasSuffix(path, "networks/"+mockNwID) {
+ rsp = string(mockNwJSON)
+ } else if strings.Contains(path, fmt.Sprintf("endpoints?name=%s", mockServiceName)) {
+ rsp = string(mockServiceListJSON)
+ } else if strings.Contains(path, "endpoints?name=") {
+ rsp = "[]"
+ } else if strings.Contains(path, fmt.Sprintf("endpoints?partial-id=%s", mockServiceID)) {
+ rsp = string(mockServiceListJSON)
+ } else if strings.Contains(path, "endpoints?partial-id=") {
+ rsp = "[]"
+ } else if strings.HasSuffix(path, "endpoints") {
+ rsp = string(mockServiceListJSON)
+ } else if strings.HasSuffix(path, "endpoints/"+mockServiceID) {
+ rsp = string(mockServiceJSON)
+ }
+ case "POST":
+ var data []byte
+ if strings.HasSuffix(path, "networks") {
+ data, _ = json.Marshal(mockNwID)
+ } else if strings.HasSuffix(path, "endpoints") {
+ data, _ = json.Marshal(mockServiceID)
+ } else if strings.HasSuffix(path, "containers") {
+ data, _ = json.Marshal(mockContainerID)
+ }
+ rsp = string(data)
+ case "PUT":
+ case "DELETE":
+ rsp = ""
+ }
+ return nopCloser{bytes.NewBufferString(rsp)}, 200, nil
+ }
+}
+
+func TestClientDummyCommand(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "dummy")
+ if err == nil {
+ t.Fatalf("Incorrect Command must fail")
+ }
+}
+
+func TestClientNetworkInvalidCommand(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "invalid")
+ if err == nil {
+ t.Fatalf("Passing invalid commands must fail")
+ }
+}
+
+func TestClientNetworkCreate(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create", mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkCreateWithDriver(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create", "-f=dummy", mockNwName)
+ if err == nil {
+ t.Fatalf("Passing incorrect flags to the create command must fail")
+ }
+
+ err = cli.Cmd("docker", "network", "create", "-d=dummy", mockNwName)
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+}
+
+func TestClientNetworkRm(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "rm", mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkLs(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "ls")
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkInfo(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "info", mockNwName)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+func TestClientNetworkInfoById(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "info", mockNwID)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+
+// Docker Flag processing in flag.go uses os.Exit() frequently, even for --help
+// TODO : Handle the --help test-case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateHelp(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ return nil, 0, nil
+ }
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create", "--help")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+}
+*/
+
+// Docker flag processing in flag.go uses os.Exit(1) for incorrect parameter case.
+// TODO : Handle the missing argument case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateMissingArgument(t *testing.T) {
+ var out, errOut bytes.Buffer
+ cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ return nil, 0, nil
+ }
+ cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+ err := cli.Cmd("docker", "network", "create")
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+}
+*/
diff --git a/vendor/src/github.com/docker/libnetwork/client/network.go b/vendor/src/github.com/docker/libnetwork/client/network.go
new file mode 100644
index 0000000000..4e0232941c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/network.go
@@ -0,0 +1,241 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "text/tabwriter"
+
+ flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/stringid"
+)
+
+const (
+ nullNetType = "null"
+)
+
+type command struct {
+ name string
+ description string
+}
+
+var (
+ networkCommands = []command{
+ {"create", "Create a network"},
+ {"rm", "Remove a network"},
+ {"ls", "List all networks"},
+ {"info", "Display information of a network"},
+ }
+)
+
+// CmdNetwork handles the root Network UI
+func (cli *NetworkCli) CmdNetwork(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "network", "COMMAND [OPTIONS] [arg...]", networkUsage(chain), false)
+ cmd.Require(flag.Min, 1)
+ err := cmd.ParseFlags(args, true)
+ if err == nil {
+ cmd.Usage()
+ return fmt.Errorf("invalid command : %v", args)
+ }
+ return err
+}
+
+// CmdNetworkCreate handles Network Create UI
+func (cli *NetworkCli) CmdNetworkCreate(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "create", "NETWORK-NAME", "Creates a new network with a name specified by the user", false)
+ flDriver := cmd.String([]string{"d", "-driver"}, "null", "Driver to manage the Network")
+ cmd.Require(flag.Min, 1)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+ if *flDriver == "" {
+ *flDriver = nullNetType
+ }
+
+ nc := networkCreate{Name: cmd.Arg(0), NetworkType: *flDriver}
+
+ obj, _, err := readBody(cli.call("POST", "/networks", nc, nil))
+ if err != nil {
+ return err
+ }
+ var replyID string
+ err = json.Unmarshal(obj, &replyID)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(cli.out, "%s\n", replyID)
+ return nil
+}
+
+// CmdNetworkRm handles Network Delete UI
+func (cli *NetworkCli) CmdNetworkRm(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "rm", "NETWORK", "Deletes a network", false)
+ cmd.Require(flag.Min, 1)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+ id, err := lookupNetworkID(cli, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+ _, _, err = readBody(cli.call("DELETE", "/networks/"+id, nil, nil))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CmdNetworkLs handles Network List UI
+func (cli *NetworkCli) CmdNetworkLs(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "ls", "", "Lists all the networks created by the user", false)
+ quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
+ noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Do not truncate the output")
+ nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show the latest network created")
+ last := cmd.Int([]string{"n"}, -1, "Show n last created networks")
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+ obj, _, err := readBody(cli.call("GET", "/networks", nil, nil))
+ if err != nil {
+ return err
+ }
+ if *last == -1 && *nLatest {
+ *last = 1
+ }
+
+ var networkResources []networkResource
+ err = json.Unmarshal(obj, &networkResources)
+ if err != nil {
+ return err
+ }
+
+ wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+
+ // unless quiet (-q) is specified, print field titles
+ if !*quiet {
+ fmt.Fprintln(wr, "NETWORK ID\tNAME\tTYPE")
+ }
+
+ for _, networkResource := range networkResources {
+ ID := networkResource.ID
+ netName := networkResource.Name
+ if !*noTrunc {
+ ID = stringid.TruncateID(ID)
+ }
+ if *quiet {
+ fmt.Fprintln(wr, ID)
+ continue
+ }
+ netType := networkResource.Type
+ fmt.Fprintf(wr, "%s\t%s\t%s\t",
+ ID,
+ netName,
+ netType)
+ fmt.Fprint(wr, "\n")
+ }
+ wr.Flush()
+ return nil
+}
+
+// CmdNetworkInfo handles Network Info UI
+func (cli *NetworkCli) CmdNetworkInfo(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "info", "NETWORK", "Displays detailed information on a network", false)
+ cmd.Require(flag.Min, 1)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ id, err := lookupNetworkID(cli, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ obj, _, err := readBody(cli.call("GET", "/networks/"+id, nil, nil))
+ if err != nil {
+ return err
+ }
+ networkResource := &networkResource{}
+ if err := json.NewDecoder(bytes.NewReader(obj)).Decode(networkResource); err != nil {
+ return err
+ }
+ fmt.Fprintf(cli.out, "Network Id: %s\n", networkResource.ID)
+ fmt.Fprintf(cli.out, "Name: %s\n", networkResource.Name)
+ fmt.Fprintf(cli.out, "Type: %s\n", networkResource.Type)
+ if networkResource.Endpoints != nil {
+ for _, endpointResource := range networkResource.Endpoints {
+ fmt.Fprintf(cli.out, " Service Id: %s\n", endpointResource.ID)
+ fmt.Fprintf(cli.out, "\tName: %s\n", endpointResource.Name)
+ }
+ }
+
+ return nil
+}
+
+// Helper function to predict if a string is a name or id or partial-id
+// This provides a best-effort mechanism to identify a id with the help of GET Filter APIs
+// Being a UI, its most likely that name will be used by the user, which is used to lookup
+// the corresponding ID. If ID is not found, this function will assume that the passed string
+// is an ID by itself.
+
+func lookupNetworkID(cli *NetworkCli, nameID string) (string, error) {
+ obj, statusCode, err := readBody(cli.call("GET", "/networks?name="+nameID, nil, nil))
+ if err != nil {
+ return "", err
+ }
+
+ if statusCode != http.StatusOK {
+ return "", fmt.Errorf("name query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+ }
+
+ var list []*networkResource
+ err = json.Unmarshal(obj, &list)
+ if err != nil {
+ return "", err
+ }
+ if len(list) > 0 {
+ // name query filter will always return a single-element collection
+ return list[0].ID, nil
+ }
+
+ // Check for Partial-id
+ obj, statusCode, err = readBody(cli.call("GET", "/networks?partial-id="+nameID, nil, nil))
+ if err != nil {
+ return "", err
+ }
+
+ if statusCode != http.StatusOK {
+ return "", fmt.Errorf("partial-id match query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+ }
+
+ err = json.Unmarshal(obj, &list)
+ if err != nil {
+ return "", err
+ }
+ if len(list) == 0 {
+ return "", fmt.Errorf("resource not found %s", nameID)
+ }
+ if len(list) > 1 {
+ return "", fmt.Errorf("multiple Networks matching the partial identifier (%s). Please use full identifier", nameID)
+ }
+ return list[0].ID, nil
+}
+
+func networkUsage(chain string) string {
+ help := "Commands:\n"
+
+ for _, cmd := range networkCommands {
+ help += fmt.Sprintf(" %-25.25s%s\n", cmd.name, cmd.description)
+ }
+
+ for _, cmd := range serviceCommands {
+ help += fmt.Sprintf(" %-25.25s%s\n", "service "+cmd.name, cmd.description)
+ }
+
+ help += fmt.Sprintf("\nRun '%s network COMMAND --help' for more information on a command.", chain)
+ return help
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/service.go b/vendor/src/github.com/docker/libnetwork/client/service.go
new file mode 100644
index 0000000000..afdbb7f84b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/service.go
@@ -0,0 +1,7 @@
+// +build !experimental
+
+package client
+
+var (
+ serviceCommands = []command{}
+)
diff --git a/vendor/src/github.com/docker/libnetwork/client/service_experimental.go b/vendor/src/github.com/docker/libnetwork/client/service_experimental.go
new file mode 100644
index 0000000000..02555fc197
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/service_experimental.go
@@ -0,0 +1,317 @@
+// +build experimental
+
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "text/tabwriter"
+
+ flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/stringid"
+)
+
+var (
+ serviceCommands = []command{
+ {"create", "Create a service endpoint"},
+ {"rm", "Remove a service endpoint"},
+ {"join", "Join a container to a service endpoint"},
+ {"leave", "Leave a container from a service endpoint"},
+ {"ls", "Lists all service endpoints on a network"},
+ {"info", "Display information of a service endpoint"},
+ }
+)
+
+func lookupServiceID(cli *NetworkCli, networkID string, nameID string) (string, error) {
+ obj, statusCode, err := readBody(cli.call("GET", fmt.Sprintf("/networks/%s/endpoints?name=%s", networkID, nameID), nil, nil))
+ if err != nil {
+ return "", err
+ }
+
+ if statusCode != http.StatusOK {
+ return "", fmt.Errorf("name query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+ }
+
+ var list []*networkResource
+ err = json.Unmarshal(obj, &list)
+ if err != nil {
+ return "", err
+ }
+ if len(list) > 0 {
+ // name query filter will always return a single-element collection
+ return list[0].ID, nil
+ }
+
+ // Check for Partial-id
+ obj, statusCode, err = readBody(cli.call("GET", fmt.Sprintf("/networks/%s/endpoints?partial-id=%s", networkID, nameID), nil, nil))
+ if err != nil {
+ return "", err
+ }
+
+ if statusCode != http.StatusOK {
+ return "", fmt.Errorf("partial-id match query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+ }
+
+ err = json.Unmarshal(obj, &list)
+ if err != nil {
+ return "", err
+ }
+ if len(list) == 0 {
+ return "", fmt.Errorf("resource not found %s", nameID)
+ }
+ if len(list) > 1 {
+ return "", fmt.Errorf("multiple services matching the partial identifier (%s). Please use full identifier", nameID)
+ }
+ return list[0].ID, nil
+}
+
+func lookupContainerID(cli *NetworkCli, nameID string) (string, error) {
+ // TODO : containerID to sandbox-key ?
+ return nameID, nil
+}
+
+// CmdNetworkService handles the network service UI
+func (cli *NetworkCli) CmdNetworkService(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "service", "COMMAND [OPTIONS] [arg...]", serviceUsage(chain), false)
+ cmd.Require(flag.Min, 1)
+ err := cmd.ParseFlags(args, true)
+ if err == nil {
+ cmd.Usage()
+ return fmt.Errorf("Invalid command : %v", args)
+ }
+ return err
+}
+
+// CmdNetworkServiceCreate handles service create UI
+func (cli *NetworkCli) CmdNetworkServiceCreate(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "create", "SERVICE NETWORK", "Creates a new service on a network", false)
+ cmd.Require(flag.Min, 2)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+ if err != nil {
+ return err
+ }
+
+ ec := endpointCreate{Name: cmd.Arg(0), NetworkID: networkID}
+
+ obj, _, err := readBody(cli.call("POST", "/networks/"+networkID+"/endpoints", ec, nil))
+ if err != nil {
+ return err
+ }
+
+ var replyID string
+ err = json.Unmarshal(obj, &replyID)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(cli.out, "%s\n", replyID)
+ return nil
+}
+
+// CmdNetworkServiceRm handles service delete UI
+func (cli *NetworkCli) CmdNetworkServiceRm(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "rm", "SERVICE NETWORK", "Deletes a service", false)
+ cmd.Require(flag.Min, 2)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+ if err != nil {
+ return err
+ }
+
+ serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ _, _, err = readBody(cli.call("DELETE", "/networks/"+networkID+"/endpoints/"+serviceID, nil, nil))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CmdNetworkServiceLs handles service list UI
+func (cli *NetworkCli) CmdNetworkServiceLs(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "ls", "NETWORK", "Lists all the services on a network", false)
+ quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
+ noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Do not truncate the output")
+ nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show the latest network created")
+ last := cmd.Int([]string{"n"}, -1, "Show n last created networks")
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ cmd.Require(flag.Min, 1)
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ obj, _, err := readBody(cli.call("GET", "/networks/"+networkID+"/endpoints", nil, nil))
+ if err != nil {
+ fmt.Fprintf(cli.err, "%s", err.Error())
+ return err
+ }
+ if *last == -1 && *nLatest {
+ *last = 1
+ }
+
+ var endpointResources []endpointResource
+ err = json.Unmarshal(obj, &endpointResources)
+ if err != nil {
+ return err
+ }
+
+ wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+ // unless quiet (-q) is specified, print field titles
+ if !*quiet {
+ fmt.Fprintln(wr, "NETWORK SERVICE ID\tNAME\tNETWORK")
+ }
+
+ for _, networkResource := range endpointResources {
+ ID := networkResource.ID
+ netName := networkResource.Name
+ if !*noTrunc {
+ ID = stringid.TruncateID(ID)
+ }
+ if *quiet {
+ fmt.Fprintln(wr, ID)
+ continue
+ }
+ network := networkResource.Network
+ fmt.Fprintf(wr, "%s\t%s\t%s",
+ ID,
+ netName,
+ network)
+ fmt.Fprint(wr, "\n")
+ }
+ wr.Flush()
+
+ return nil
+}
+
+// CmdNetworkServiceInfo handles service info UI
+func (cli *NetworkCli) CmdNetworkServiceInfo(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "info", "SERVICE NETWORK", "Displays detailed information on a service", false)
+ cmd.Require(flag.Min, 2)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+ if err != nil {
+ return err
+ }
+
+ serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ obj, _, err := readBody(cli.call("GET", "/networks/"+networkID+"/endpoints/"+serviceID, nil, nil))
+ if err != nil {
+ fmt.Fprintf(cli.err, "%s", err.Error())
+ return err
+ }
+
+ endpointResource := &endpointResource{}
+ if err := json.NewDecoder(bytes.NewReader(obj)).Decode(endpointResource); err != nil {
+ return err
+ }
+ fmt.Fprintf(cli.out, "Service Id: %s\n", endpointResource.ID)
+ fmt.Fprintf(cli.out, "\tName: %s\n", endpointResource.Name)
+ fmt.Fprintf(cli.out, "\tNetwork: %s\n", endpointResource.Network)
+
+ return nil
+}
+
+// CmdNetworkServiceJoin handles service join UI
+func (cli *NetworkCli) CmdNetworkServiceJoin(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "join", "CONTAINER SERVICE NETWORK", "Sets a container as a service backend", false)
+ cmd.Require(flag.Min, 3)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ containerID, err := lookupContainerID(cli, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(2))
+ if err != nil {
+ return err
+ }
+
+ serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(1))
+ if err != nil {
+ return err
+ }
+
+ nc := endpointJoin{ContainerID: containerID}
+
+ _, _, err = readBody(cli.call("POST", "/networks/"+networkID+"/endpoints/"+serviceID+"/containers", nc, nil))
+ if err != nil {
+ fmt.Fprintf(cli.err, "%s", err.Error())
+ return err
+ }
+ return nil
+}
+
+// CmdNetworkServiceLeave handles service leave UI
+func (cli *NetworkCli) CmdNetworkServiceLeave(chain string, args ...string) error {
+ cmd := cli.Subcmd(chain, "leave", "CONTAINER SERVICE NETWORK", "Removes a container from service backend", false)
+ cmd.Require(flag.Min, 3)
+ err := cmd.ParseFlags(args, true)
+ if err != nil {
+ return err
+ }
+
+ containerID, err := lookupContainerID(cli, cmd.Arg(0))
+ if err != nil {
+ return err
+ }
+
+ networkID, err := lookupNetworkID(cli, cmd.Arg(2))
+ if err != nil {
+ return err
+ }
+
+ serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(1))
+ if err != nil {
+ return err
+ }
+
+ _, _, err = readBody(cli.call("DELETE", "/networks/"+networkID+"/endpoints/"+serviceID+"/containers/"+containerID, nil, nil))
+ if err != nil {
+ fmt.Fprintf(cli.err, "%s", err.Error())
+ return err
+ }
+ return nil
+}
+
+func serviceUsage(chain string) string {
+ help := "Commands:\n"
+
+ for _, cmd := range serviceCommands {
+ help += fmt.Sprintf(" %-10.10s%s\n", cmd, cmd.description)
+ }
+
+ help += fmt.Sprintf("\nRun '%s service COMMAND --help' for more information on a command.", chain)
+ return help
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/types.go b/vendor/src/github.com/docker/libnetwork/client/types.go
new file mode 100644
index 0000000000..972ed435e8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/types.go
@@ -0,0 +1,68 @@
+package client
+
+import "github.com/docker/libnetwork/types"
+
+/***********
+ Resources
+************/
+
+// networkResource is the body of the "get network" http response message
+type networkResource struct {
+ Name string
+ ID string
+ Type string
+ Endpoints []*endpointResource
+}
+
+// endpointResource is the body of the "get endpoint" http response message
+type endpointResource struct {
+ Name string
+ ID string
+ Network string
+}
+
+/***********
+ Body types
+ ************/
+
+// networkCreate is the expected body of the "create network" http request message
+type networkCreate struct {
+ Name string
+ NetworkType string
+ Options map[string]interface{}
+}
+
+// endpointCreate represents the body of the "create endpoint" http request message
+type endpointCreate struct {
+ Name string
+ NetworkID string
+ ExposedPorts []types.TransportPort
+ PortMapping []types.PortBinding
+}
+
+// endpointJoin represents the expected body of the "join endpoint" or "leave endpoint" http request messages
+type endpointJoin struct {
+ ContainerID string
+ HostName string
+ DomainName string
+ HostsPath string
+ ResolvConfPath string
+ DNS []string
+ ExtraHosts []endpointExtraHost
+ ParentUpdates []endpointParentUpdate
+ UseDefaultSandbox bool
+}
+
+// EndpointExtraHost represents the extra host object
+type endpointExtraHost struct {
+ Name string
+ Address string
+}
+
+// EndpointParentUpdate is the object carrying the information about the
+// endpoint parent that needs to be updated
+type endpointParentUpdate struct {
+ EndpointID string
+ Name string
+ Address string
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go
new file mode 100644
index 0000000000..8c599249a1
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go
@@ -0,0 +1,204 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+
+ flag "github.com/docker/docker/pkg/mflag"
+ "github.com/docker/docker/pkg/parsers"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/term"
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/api"
+ "github.com/docker/libnetwork/client"
+ "github.com/gorilla/mux"
+)
+
+var (
+ // DefaultHTTPHost is used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+ DefaultHTTPHost = "127.0.0.1"
+ // DefaultHTTPPort is the default http port used by dnet
+ DefaultHTTPPort = 2385
+ // DefaultUnixSocket exported
+ DefaultUnixSocket = "/var/run/dnet.sock"
+)
+
+func main() {
+ _, stdout, stderr := term.StdStreams()
+ logrus.SetOutput(stderr)
+
+ err := dnetCommand(stdout, stderr)
+ if err != nil {
+ os.Exit(1)
+ }
+}
+
+func dnetCommand(stdout, stderr io.Writer) error {
+ flag.Parse()
+
+ if *flHelp {
+ flag.Usage()
+ return nil
+ }
+
+ if *flLogLevel != "" {
+ lvl, err := logrus.ParseLevel(*flLogLevel)
+ if err != nil {
+ fmt.Fprintf(stderr, "Unable to parse logging level: %s\n", *flLogLevel)
+ return err
+ }
+ logrus.SetLevel(lvl)
+ } else {
+ logrus.SetLevel(logrus.InfoLevel)
+ }
+
+ if *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+
+ if *flHost == "" {
+ defaultHost := os.Getenv("DNET_HOST")
+ if defaultHost == "" {
+ // TODO : Add UDS support
+ defaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ }
+ *flHost = defaultHost
+ }
+
+ dc, err := newDnetConnection(*flHost)
+ if err != nil {
+ if *flDaemon {
+ logrus.Error(err)
+ } else {
+ fmt.Fprint(stderr, err)
+ }
+ return err
+ }
+
+ if *flDaemon {
+ err := dc.dnetDaemon()
+ if err != nil {
+ logrus.Errorf("dnet Daemon exited with an error : %v", err)
+ }
+ return err
+ }
+
+ cli := client.NewNetworkCli(stdout, stderr, dc.httpCall)
+ if err := cli.Cmd("dnet", flag.Args()...); err != nil {
+ fmt.Fprintln(stderr, err)
+ return err
+ }
+ return nil
+}
+
+type dnetConnection struct {
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+}
+
+func (d *dnetConnection) dnetDaemon() error {
+ controller, err := libnetwork.New()
+ if err != nil {
+ fmt.Println("Error starting dnetDaemon :", err)
+ return err
+ }
+ httpHandler := api.NewHTTPHandler(controller)
+ r := mux.NewRouter().StrictSlash(false)
+ post := r.PathPrefix("/{.*}/networks").Subrouter()
+ post.Methods("GET", "PUT", "POST", "DELETE").HandlerFunc(httpHandler)
+ return http.ListenAndServe(d.addr, r)
+}
+
+func newDnetConnection(val string) (*dnetConnection, error) {
+ url, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
+ if err != nil {
+ return nil, err
+ }
+ protoAddrParts := strings.SplitN(url, "://", 2)
+ if len(protoAddrParts) != 2 {
+ return nil, fmt.Errorf("bad format, expected tcp://ADDR")
+ }
+ if strings.ToLower(protoAddrParts[0]) != "tcp" {
+ return nil, fmt.Errorf("dnet currently only supports tcp transport")
+ }
+
+ return &dnetConnection{protoAddrParts[0], protoAddrParts[1]}, nil
+}
+
+func (d *dnetConnection) httpCall(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+ var in io.Reader
+ in, err := encodeData(data)
+ if err != nil {
+ return nil, -1, err
+ }
+
+ req, err := http.NewRequest(method, fmt.Sprintf("/dnet%s", path), in)
+ if err != nil {
+ return nil, -1, err
+ }
+
+ setupRequestHeaders(method, data, req, headers)
+
+ req.URL.Host = d.addr
+ req.URL.Scheme = "http"
+
+ httpClient := &http.Client{}
+ resp, err := httpClient.Do(req)
+ statusCode := -1
+ if resp != nil {
+ statusCode = resp.StatusCode
+ }
+ if err != nil {
+ return nil, statusCode, fmt.Errorf("error when trying to connect: %v", err)
+ }
+
+ if statusCode < 200 || statusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, statusCode, err
+ }
+ return nil, statusCode, fmt.Errorf("error : %s", bytes.TrimSpace(body))
+ }
+
+ return resp.Body, statusCode, nil
+}
+
+func setupRequestHeaders(method string, data interface{}, req *http.Request, headers map[string][]string) {
+ if data != nil {
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ }
+
+ expectedPayload := (method == "POST" || method == "PUT")
+
+ if expectedPayload && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go
new file mode 100644
index 0000000000..b8466f1a70
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go
@@ -0,0 +1,132 @@
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/docker/libnetwork/netutils"
+)
+
+const dnetCommandName = "dnet"
+
+var origStdOut = os.Stdout
+
+func TestDnetDaemonCustom(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("This test must run inside a container ")
+ }
+ customPort := 4567
+ doneChan := make(chan bool)
+ go func() {
+ args := []string{dnetCommandName, "-d", fmt.Sprintf("-H=:%d", customPort)}
+ executeDnetCommand(t, args, true)
+ doneChan <- true
+ }()
+
+ select {
+ case <-doneChan:
+ t.Fatal("dnet Daemon is not supposed to exit")
+ case <-time.After(3 * time.Second):
+ args := []string{dnetCommandName, "-d=false", fmt.Sprintf("-H=:%d", customPort), "-D", "network", "ls"}
+ executeDnetCommand(t, args, true)
+ }
+}
+
+func TestDnetDaemonInvalidCustom(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("This test must run inside a container ")
+ }
+ customPort := 4668
+ doneChan := make(chan bool)
+ go func() {
+ args := []string{dnetCommandName, "-d=true", fmt.Sprintf("-H=:%d", customPort)}
+ executeDnetCommand(t, args, true)
+ doneChan <- true
+ }()
+
+ select {
+ case <-doneChan:
+ t.Fatal("dnet Daemon is not supposed to exit")
+ case <-time.After(3 * time.Second):
+ args := []string{dnetCommandName, "-d=false", "-H=:6669", "-D", "network", "ls"}
+ executeDnetCommand(t, args, false)
+ }
+}
+
+func TestDnetDaemonInvalidParams(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("This test must run inside a container ")
+ }
+ args := []string{dnetCommandName, "-d=false", "-H=tcp:/127.0.0.1:8080"}
+ executeDnetCommand(t, args, false)
+
+ args = []string{dnetCommandName, "-d=false", "-H=unix://var/run/dnet.sock"}
+ executeDnetCommand(t, args, false)
+
+ args = []string{dnetCommandName, "-d=false", "-H=", "-l=invalid"}
+ executeDnetCommand(t, args, false)
+
+ args = []string{dnetCommandName, "-d=false", "-H=", "-l=error", "invalid"}
+ executeDnetCommand(t, args, false)
+}
+
+func TestDnetDefaultsWithFlags(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("This test must run inside a container ")
+ }
+ doneChan := make(chan bool)
+ go func() {
+ args := []string{dnetCommandName, "-d=true", "-H=", "-l=error"}
+ executeDnetCommand(t, args, true)
+ doneChan <- true
+ }()
+
+ select {
+ case <-doneChan:
+ t.Fatal("dnet Daemon is not supposed to exit")
+ case <-time.After(3 * time.Second):
+ args := []string{dnetCommandName, "-d=false", "network", "create", "-d=null", "test"}
+ executeDnetCommand(t, args, true)
+
+ args = []string{dnetCommandName, "-d=false", "-D", "network", "ls"}
+ executeDnetCommand(t, args, true)
+ }
+}
+
+func TestDnetMain(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("This test must run inside a container ")
+ }
+ customPort := 4568
+ doneChan := make(chan bool)
+ go func() {
+ args := []string{dnetCommandName, "-d=true", "-h=false", fmt.Sprintf("-H=:%d", customPort)}
+ os.Args = args
+ main()
+ doneChan <- true
+ }()
+ select {
+ case <-doneChan:
+ t.Fatal("dnet Daemon is not supposed to exit")
+ case <-time.After(2 * time.Second):
+ }
+}
+
+func executeDnetCommand(t *testing.T, args []string, shouldSucced bool) {
+ _, w, _ := os.Pipe()
+ os.Stdout = w
+
+ os.Args = args
+ err := dnetCommand(ioutil.Discard, ioutil.Discard)
+ if shouldSucced && err != nil {
+ os.Stdout = origStdOut
+ t.Fatalf("cli [%v] must succeed, but failed with an error : %v", args, err)
+ } else if !shouldSucced && err == nil {
+ os.Stdout = origStdOut
+ t.Fatalf("cli [%v] must fail, but succeeded with an error : %v", args, err)
+ }
+ os.Stdout = origStdOut
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go
new file mode 100644
index 0000000000..2e77e1873b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ flag "github.com/docker/docker/pkg/mflag"
+)
+
+type command struct {
+ name string
+ description string
+}
+
+type byName []command
+
+var (
+ flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
+ flHost = flag.String([]string{"H", "-host"}, "", "Daemon socket to connect to")
+ flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
+ flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
+ flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage")
+
+ dnetCommands = []command{
+ {"network", "Network management commands"},
+ }
+)
+
+func init() {
+ flag.Usage = func() {
+ fmt.Fprint(os.Stdout, "Usage: dnet [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for container networking.\n\nOptions:\n")
+
+ flag.CommandLine.SetOutput(os.Stdout)
+ flag.PrintDefaults()
+
+ help := "\nCommands:\n"
+
+ for _, cmd := range dnetCommands {
+ help += fmt.Sprintf(" %-10.10s%s\n", cmd.name, cmd.description)
+ }
+
+ help += "\nRun 'dnet COMMAND --help' for more information on a command."
+ fmt.Fprintf(os.Stdout, "%s\n", help)
+ }
+}
+
+func printUsage() {
+ fmt.Println("Usage: dnet network ")
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go b/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go
new file mode 100644
index 0000000000..a15fda0c34
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go
@@ -0,0 +1,66 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/types"
+)
+
+func main() {
+ // Create a new controller instance
+ controller, err := libnetwork.New()
+ if err != nil {
+ return
+ }
+
+ // Select and configure the network driver
+ networkType := "bridge"
+
+ driverOptions := options.Generic{}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = driverOptions
+ err = controller.ConfigureNetworkDriver(networkType, genericOption)
+ if err != nil {
+ return
+ }
+
+ // Create a network for containers to join.
+ // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+ network, err := controller.NewNetwork(networkType, "network1")
+ if err != nil {
+ return
+ }
+
+ // For each new container: allocate IP and interfaces. The returned network
+ // settings will be used for container infos (inspect and such), as well as
+ // iptables rules for port publishing. This info is contained or accessible
+ // from the returned endpoint.
+ ep, err := network.CreateEndpoint("Endpoint1")
+ if err != nil {
+ return
+ }
+
+ // A container can join the endpoint by providing the container ID to the join
+ // api which returns the sandbox key which can be used to access the sandbox
+ // created for the container during join.
+ // Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+ _, err = ep.Join("container1",
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"))
+ if err != nil {
+ return
+ }
+
+ // libentwork client can check the endpoint's operational data via the Info() API
+ epInfo, err := ep.DriverInfo()
+ mapData, ok := epInfo[netlabel.PortMap]
+ if ok {
+ portMapping, ok := mapData.([]types.PortBinding)
+ if ok {
+ fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping)
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/test/main.go b/vendor/src/github.com/docker/libnetwork/cmd/test/main.go
new file mode 100644
index 0000000000..d944654bff
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/test/main.go
@@ -0,0 +1,28 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "net"
+
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/options"
+)
+
+func main() {
+ ip, net, _ := net.ParseCIDR("192.168.100.1/24")
+ net.IP = ip
+
+ options := options.Generic{"AddressIPv4": net}
+ controller, err := libnetwork.New()
+ if err != nil {
+ log.Fatal(err)
+ }
+ netType := "bridge"
+ err = controller.ConfigureNetworkDriver(netType, options)
+ netw, err := controller.NewNetwork(netType, "dummy")
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("Network=%#v\n", netw)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go
new file mode 100644
index 0000000000..442473eb20
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/controller.go
@@ -0,0 +1,301 @@
+/*
+Package libnetwork provides the basic functionality and extension points to
+create network namespaces and allocate interfaces for containers to use.
+
+ // Create a new controller instance
+ controller, _err := libnetwork.New()
+
+ // Select and configure the network driver
+ networkType := "bridge"
+
+ driverOptions := options.Generic{}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = driverOptions
+ err := controller.ConfigureNetworkDriver(networkType, genericOption)
+ if err != nil {
+ return
+ }
+
+ // Create a network for containers to join.
+ // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+ network, err := controller.NewNetwork(networkType, "network1")
+ if err != nil {
+ return
+ }
+
+ // For each new container: allocate IP and interfaces. The returned network
+ // settings will be used for container infos (inspect and such), as well as
+ // iptables rules for port publishing. This info is contained or accessible
+ // from the returned endpoint.
+ ep, err := network.CreateEndpoint("Endpoint1")
+ if err != nil {
+ return
+ }
+
+ // A container can join the endpoint by providing the container ID to the join
+ // api which returns the sandbox key which can be used to access the sandbox
+ // created for the container during join.
+ // Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+ _, err = ep.Join("container1",
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"))
+ if err != nil {
+ return
+ }
+*/
+package libnetwork
+
+import (
+ "sync"
+
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/sandbox"
+ "github.com/docker/libnetwork/types"
+)
+
+// NetworkController provides the interface for controller instance which manages
+// networks.
+type NetworkController interface {
+ // ConfigureNetworkDriver applies the passed options to the driver instance for the specified network type
+ ConfigureNetworkDriver(networkType string, options map[string]interface{}) error
+
+ // Create a new network. The options parameter carries network specific options.
+ // Labels support will be added in the near future.
+ NewNetwork(networkType, name string, options ...NetworkOption) (Network, error)
+
+ // Networks returns the list of Network(s) managed by this controller.
+ Networks() []Network
+
+ // WalkNetworks uses the provided function to walk the Network(s) managed by this controller.
+ WalkNetworks(walker NetworkWalker)
+
+ // NetworkByName returns the Network which has the passed name. If not found, the error ErrNoSuchNetwork is returned.
+ NetworkByName(name string) (Network, error)
+
+ // NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned.
+ NetworkByID(id string) (Network, error)
+}
+
+// NetworkWalker is a client provided function which will be used to walk the Networks.
+// When the function returns true, the walk will stop.
+type NetworkWalker func(nw Network) bool
+
+type sandboxData struct {
+ sandbox sandbox.Sandbox
+ refCnt int
+}
+
+type networkTable map[types.UUID]*network
+type endpointTable map[types.UUID]*endpoint
+type sandboxTable map[string]*sandboxData
+
+type controller struct {
+ networks networkTable
+ drivers driverTable
+ sandboxes sandboxTable
+ sync.Mutex
+}
+
+// New creates a new instance of network controller.
+func New() (NetworkController, error) {
+ c := &controller{
+ networks: networkTable{},
+ sandboxes: sandboxTable{},
+ drivers: driverTable{}}
+ if err := initDrivers(c); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func (c *controller) ConfigureNetworkDriver(networkType string, options map[string]interface{}) error {
+ c.Lock()
+ d, ok := c.drivers[networkType]
+ c.Unlock()
+ if !ok {
+ return NetworkTypeError(networkType)
+ }
+ return d.Config(options)
+}
+
+func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver) error {
+ c.Lock()
+ defer c.Unlock()
+ if _, ok := c.drivers[networkType]; ok {
+ return driverapi.ErrActiveRegistration(networkType)
+ }
+ c.drivers[networkType] = driver
+ return nil
+}
+
+// NewNetwork creates a new network of the specified network type. The options
+// are network specific and modeled in a generic way.
+func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) {
+ if name == "" {
+ return nil, ErrInvalidName(name)
+ }
+ // Check if a driver for the specified network type is available
+ c.Lock()
+ d, ok := c.drivers[networkType]
+ c.Unlock()
+ if !ok {
+ var err error
+ d, err = c.loadDriver(networkType)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Check if a network already exists with the specified network name
+ c.Lock()
+ for _, n := range c.networks {
+ if n.name == name {
+ c.Unlock()
+ return nil, NetworkNameError(name)
+ }
+ }
+ c.Unlock()
+
+ // Construct the network object
+ network := &network{
+ name: name,
+ id: types.UUID(stringid.GenerateRandomID()),
+ ctrlr: c,
+ driver: d,
+ endpoints: endpointTable{},
+ }
+
+ network.processOptions(options...)
+ // Create the network
+ if err := d.CreateNetwork(network.id, network.generic); err != nil {
+ return nil, err
+ }
+
+ // Store the network handler in controller
+ c.Lock()
+ c.networks[network.id] = network
+ c.Unlock()
+
+ return network, nil
+}
+
+func (c *controller) Networks() []Network {
+ c.Lock()
+ defer c.Unlock()
+
+ list := make([]Network, 0, len(c.networks))
+ for _, n := range c.networks {
+ list = append(list, n)
+ }
+
+ return list
+}
+
+func (c *controller) WalkNetworks(walker NetworkWalker) {
+ for _, n := range c.Networks() {
+ if walker(n) {
+ return
+ }
+ }
+}
+
+func (c *controller) NetworkByName(name string) (Network, error) {
+ if name == "" {
+ return nil, ErrInvalidName(name)
+ }
+ var n Network
+
+ s := func(current Network) bool {
+ if current.Name() == name {
+ n = current
+ return true
+ }
+ return false
+ }
+
+ c.WalkNetworks(s)
+
+ if n == nil {
+ return nil, ErrNoSuchNetwork(name)
+ }
+
+ return n, nil
+}
+
+func (c *controller) NetworkByID(id string) (Network, error) {
+ if id == "" {
+ return nil, ErrInvalidID(id)
+ }
+ c.Lock()
+ defer c.Unlock()
+ if n, ok := c.networks[types.UUID(id)]; ok {
+ return n, nil
+ }
+ return nil, ErrNoSuchNetwork(id)
+}
+
+func (c *controller) sandboxAdd(key string, create bool) (sandbox.Sandbox, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ sData, ok := c.sandboxes[key]
+ if !ok {
+ sb, err := sandbox.NewSandbox(key, create)
+ if err != nil {
+ return nil, err
+ }
+
+ sData = &sandboxData{sandbox: sb, refCnt: 1}
+ c.sandboxes[key] = sData
+ return sData.sandbox, nil
+ }
+
+ sData.refCnt++
+ return sData.sandbox, nil
+}
+
+func (c *controller) sandboxRm(key string) {
+ c.Lock()
+ defer c.Unlock()
+
+ sData := c.sandboxes[key]
+ sData.refCnt--
+
+ if sData.refCnt == 0 {
+ sData.sandbox.Destroy()
+ delete(c.sandboxes, key)
+ }
+}
+
+func (c *controller) sandboxGet(key string) sandbox.Sandbox {
+ c.Lock()
+ defer c.Unlock()
+
+ sData, ok := c.sandboxes[key]
+ if !ok {
+ return nil
+ }
+
+ return sData.sandbox
+}
+
+func (c *controller) loadDriver(networkType string) (driverapi.Driver, error) {
+ // Plugins pkg performs lazy loading of plugins that acts as remote drivers.
+ // As per the design, this Get call will result in remote driver discovery if there is a corresponding plugin available.
+ _, err := plugins.Get(networkType, driverapi.NetworkPluginEndpointType)
+ if err != nil {
+ if err == plugins.ErrNotFound {
+ return nil, types.NotFoundErrorf(err.Error())
+ }
+ return nil, err
+ }
+ c.Lock()
+ defer c.Unlock()
+ d, ok := c.drivers[networkType]
+ if !ok {
+ return nil, ErrInvalidNetworkDriver(networkType)
+ }
+ return d, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/docs/bridge.md b/vendor/src/github.com/docker/libnetwork/docs/bridge.md
new file mode 100644
index 0000000000..4633ce8c50
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/bridge.md
@@ -0,0 +1,13 @@
+Bridge Driver
+=============
+
+The bridge driver is an implementation that uses Linux Bridging and iptables to provide connectvity for containers
+It creates a single bridge, called `docker0` by default, and attaches a `veth pair` between the bridge and every endpoint.
+
+## Configuration
+
+The bridge driver supports configuration through the Docker Daemon flags.
+
+## Usage
+
+This driver is supported for the default "bridge" network only and it cannot be used for any other networks.
diff --git a/vendor/src/github.com/docker/libnetwork/docs/design.md b/vendor/src/github.com/docker/libnetwork/docs/design.md
new file mode 100644
index 0000000000..b3112da0fc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/design.md
@@ -0,0 +1,148 @@
+Design
+======
+
+The vision and goals of libnetwork are highlighted in [roadmap](../ROADMAP.md).
+This document describes how libnetwork has been designed in order to acheive this.
+Requirements for individual releases can be found on the [Project Page](https://github.com/docker/libnetwork/wiki)
+
+Many of the design decisions are inspired by the learnings from the Docker networking design as of Docker v1.6.
+Please refer to this [Docker v1.6 Design](legacy.md) document for more information on networking design as of Docker v1.6.
+
+## Goal
+
+libnetwork project will follow Docker and Linux philosophy of developing small, highly modular and composable tools that works well independently.
+Libnetwork aims to satisfy that composable need for Networking in Containers.
+
+## The Container Network Model
+
+Libnetwork implements Container Network Model (CNM) which formalizes the steps required to provide networking for containers while providing an abstraction that can be used to support multiple network drivers. The CNM is built on 3 main components.
+
+**Sandbox**
+
+A Sandbox contains the configuration of a container's network stack.
+This includes management of the container's interfaces, routing table and DNS settings.
+An implementation of a Sandbox could be a Linux Network Namespace, a FreeBSD Jail or other similar concept.
+A Sandbox may contain *many* endpoints from *multiple* networks
+
+**Endpoint**
+
+An Endpoint joins a Sandbox to a Network.
+An implementation of an Endpoint could be a `veth` pair, an Open vSwitch internal port or similar.
+An Endpoint can belong to *only one* network but may only belong to *one* Sandbox
+
+**Network**
+
+A Network is a group of Endpoints that are able to communicate with each-other directly.
+An implementation of a Network could be a Linux bridge, a VLAN etc...
+Networks consist of *many* endpoints
+
+## CNM Objects
+
+**NetworkController**
+`NetworkController` object provides the entry-point into libnetwork that exposes simple APIs for the users (such as Docker Engine) to allocate and manage Networks. libnetwork supports multiple active drivers (both inbuilt and remote). `NetworkController` allows user to bind a particular driver to a given network.
+
+**Driver**
+`Driver` is not an user visible object, but drivers provides the actual implementation that makes network work. `NetworkController` however provides an API to configure any specific driver with driver-specific options/labels that is transparent to libnetwork, but can be handled by the drivers directly. Drivers can be both inbuilt (such as Bridge, Host, None & overlay) and remote (from plugin providers) to satisfy various usecases & deployment scenarios. At this point, the Driver owns a network and is responsible for managing the network (including IPAM, etc.). This can be improved in the future by having multiple drivers participating in handling various network management functionalities.
+
+**Network**
+`Network` object is an implementation of the `CNM : Network` as defined above. `NetworkController` provides APIs to create and manage `Network` object. Whenever a `Network` is created or updated, the corresponding `Driver` will be notified of the event. LibNetwork treats `Network` object at an abstract level to provide connectivity between a group of end-points that belong to the same network and isolate from the rest. The Driver performs the actual work of providing the required connectivity and isolation. The connectivity can be within the same host or across multiple-hosts. Hence `Network` has a global scope within a cluster.
+
+**Endpoint**
+`Endpoint` represents a Service Endpoint. It provides the connectivity for services exposed by a container in a network with other services provided by other containers in the network. `Network` object provides APIs to create and manage endpoint. An endpoint can be attached to only one network. `Endpoint` creation calls are made to the corresponding `Driver` which is responsible for allocating resources for the corresponding `Sandbox`. Since Endpoint represents a Service and not necessarily a particular container, `Endpoint` has a global scope within a cluster as well.
+
+**Sandbox**
+`Sandbox` object represents container's network configuration such as ip-address, mac-address, routes, DNS entries. A `Sandbox` object is created when the user requests to create an endpoint on a network. The `Driver` that handles the `Network` is responsible to allocate the required network resources (such as ip-address) and pass the info called `SandboxInfo` back to libnetwork. libnetwork will make use of OS specific constructs (example: netns for Linux) to populate the network configuration into the containers that is represented by the `Sandbox`. A `Sandbox` can have multiple endpoints attached to different networks. Since `Sandbox` is associated with a particular container in a given host, it has a local scope that represents the Host that the Container belong to.
+
+**CNM Attributes**
+
+***Options***
+`Options` provides a generic and flexible mechanism to pass `Driver` specific configuration option from the user to the `Driver` directly. `Options` are just key-value pairs of data with `key` represented by a string and `value` represented by a generic object (such as golang `interface{}`). Libnetwork will operate on the `Options` ONLY if the `key` matches any of the well-known `Label` defined in the `net-labels` package. `Options` also encompasses `Labels` as explained below. `Options` are generally NOT end-user visible (in UI), while `Labels` are.
+
+***Labels***
+`Labels` are very similar to `Options` & infact they are just a subset of `Options`. `Labels` are typically end-user visible and are represented in the UI explicitely using the `--labels` option. They are passed from the UI to the `Driver` so that `Driver` can make use of it and perform any `Driver` specific operation (such as a subnet to allocate IP-Addresses from in a Network).
+
+## CNM Lifecycle
+
+Consumers of the CNM, like Docker for example, interact through the CNM Objects and its APIs to network the containers that they manage.
+
+0. `Drivers` registers with `NetworkController`. Build-in drivers registers inside of LibNetwork, while remote Drivers registers with LibNetwork via Plugin mechanism. (*plugin-mechanism is WIP*). Each `driver` handles a particular `networkType`.
+
+1. `NetworkController` object is created using `libnetwork.New()` API to manage the allocation of Networks and optionally configure a `Driver` with driver specific `Options`.
+
+2. `Network` is created using the controller's `NewNetwork()` API by providing a `name` and `networkType`. `networkType` parameter helps to choose a corresponding `Driver` and binds the created `Network` to that `Driver`. From this point, any operation on `Network` will be handled by that `Driver`.
+
+3. `controller.NewNetwork()` API also takes in optional `options` parameter which carries Driver-specific options and `Labels`, which the Drivers can make use for its purpose.
+
+4. `network.CreateEndpoint()` can be called to create a new Endpoint in a given network. This API also accepts optional `options` parameter which drivers can make use of. These 'options' carry both well-known labels and driver-specific labels. Drivers will in turn be called with `driver.CreateEndpoint` and it can choose to reserve IPv4/IPv6 addresses when an `Endpoint` is created in a `Network`. The `Driver` will assign these addresses using `InterfaceInfo` interface defined in the `driverapi`. The IP/IPv6 are needed to complete the endpoint as service definition along with the ports the endpoint exposes since essentially a service endpoint is nothing but a network address and the port number that the application container is listening on.
+
+5. `endpoint.Join()` can be used to attach a container to a `Endpoint`. The Join operation will create a `Sandbox` if it doesnt exist already for that container. The Drivers can make use of the Sandbox Key to identify multiple endpoints attached to a same container. This API also accepts optional `options` parameter which drivers can make use of.
+ * Though it is not a direct design issue of LibNetwork, it is highly encouraged to have users like `Docker` to call the endpoint.Join() during Container's `Start()` lifecycle that is invoked *before* the container is made operational. As part of Docker integration, this will be taken care of.
+ * one of a FAQ on endpoint join() API is that, why do we need an API to create an Endpoint and another to join the endpoint.
+ - The answer is based on the fact that Endpoint represents a Service which may or may not be backed by a Container. When an Endpoint is created, it will have its resources reserved so that any container can get attached to the endpoint later and get a consistent networking behaviour.
+
+6. `endpoint.Leave()` can be invoked when a container is stopped. The `Driver` can cleanup the states that it allocated during the `Join()` call. LibNetwork will delete the `Sandbox` when the last referencing endpoint leaves the network. But LibNetwork keeps hold of the IP addresses as long as the endpoint is still present and will be reused when the container(or any container) joins again. This ensures that the container's resources are reused when they are Stopped and Started again.
+
+7. `endpoint.Delete()` is used to delete an endpoint from a network. This results in deleting an endpoint and cleaning up the cached `sandbox.Info`.
+
+8. `network.Delete()` is used to delete a network. LibNetwork will not allow the delete to proceed if there are any existing endpoints attached to the Network.
+
+
+## Implementation Details
+
+### Networks & Endpoints
+
+LibNetwork's Network and Endpoint APIs are primiarly for managing the corresponding Objects and book-keeping them to provide a level of abstraction as required by the CNM. It delegates the actual implementation to the drivers which realizes the functionality as promised in the CNM. For more information on these details, please see [the drivers section](#Drivers)
+
+### Sandbox
+
+Libnetwork provides a framework to implement of a Sandbox in multiple Operating Systems. Currently we have implemented Sandbox for Linux using `namespace_linux.go` and `configure_linux.go` in `sandbox` package
+This creates a Network Namespace for each sandbox which is uniquely identified by a path on the host filesystem.
+Netlink calls are used to move interfaces from the global namespace to the Sandbox namespace.
+Netlink is also used to manage the routing table in the namespace.
+
+## Drivers
+
+## API
+
+Drivers are essentially an extension of libnetwork and provides the actual implementation for all of the LibNetwork APIs defined above. Hence there is an 1-1 correspondance for all the `Network` and `Endpoint` APIs, which includes :
+* `driver.Config`
+* `driver.CreateNetwork`
+* `driver.DeleteNetwork`
+* `driver.CreateEndpoint`
+* `driver.DeleteEndpoint`
+* `driver.Join`
+* `driver.Leave`
+
+These Driver facing APIs makes use of unique identifiers (`networkid`,`endpointid`,...) instead of names (as seen in user-facing APIs).
+
+The APIs are still work in progress and there can be changes to these based on the driver requirements especially when it comes to Multi-host networking.
+
+## Implementations
+
+Libnetwork includes the following driver packages:
+
+- null
+- bridge
+- overlay
+- remote
+
+### Null
+
+The null driver is a `noop` implementation of the driver API, used only in cases where no networking is desired. This is to provide backward compatibility to the Docker's `--net=none` option.
+
+### Bridge
+
+The `bridge` driver provides a Linux-specific bridging implementation based on the Linux Bridge.
+For more details, please [see the Bridge Driver documentation](bridge.md)
+
+### Overlay
+
+The `overlay` driver implements networking that can span multiple hosts using overlay network encapsulations such as VXLAN.
+For more details on its design, please see the [Overlay Driver Design](overlay.md)
+
+### Remote
+
+The `remote` package does not provide a driver, but provides a means of supporting drivers over a remote transport.
+This allows a driver to be written in a language of your choice.
+For further details, please see the [Remote Driver Design](remote.md)
+
diff --git a/vendor/src/github.com/docker/libnetwork/docs/legacy.md b/vendor/src/github.com/docker/libnetwork/docs/legacy.md
new file mode 100644
index 0000000000..7a19dcdff9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/legacy.md
@@ -0,0 +1,15 @@
+
+This document provides a TLD&R version of https://docs.docker.com/v1.6/articles/networking/.
+If more interested in detailed operational design, please refer to this link.
+
+## Docker Networking design as of Docker v1.6
+
+Prior to libnetwork, Docker Networking was handled in both Docker Engine and libcontainer.
+Docker Engine makes use of the Bridge Driver to provide single-host networking solution with the help of linux bridge and IPTables.
+Docker Engine provides simple configurations such as `--link`, `--expose`,... to enable container connectivity within the same host by abstracting away networking configuration completely from the Containers.
+For external connectivity, it relied upon NAT & Port-mapping
+
+Docker Engine was responsible for providing the configuration for the container's networking stack.
+
+Libcontainer would then use this information to create the necessary networking devices and move them in to a network namespace.
+This namespace would then be used when the container is started.
diff --git a/vendor/src/github.com/docker/libnetwork/docs/overlay.md b/vendor/src/github.com/docker/libnetwork/docs/overlay.md
new file mode 100644
index 0000000000..ec48618c22
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/overlay.md
@@ -0,0 +1,6 @@
+Overlay Driver
+==============
+
+## Configuration
+
+## Usage
diff --git a/vendor/src/github.com/docker/libnetwork/docs/remote.md b/vendor/src/github.com/docker/libnetwork/docs/remote.md
new file mode 100644
index 0000000000..c34a1cd0b0
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/remote.md
@@ -0,0 +1,18 @@
+Remote Drivers
+==============
+
+The remote driver package provides the integration point for dynamically-registered drivers.
+
+## LibNetwork Integration
+
+When LibNetwork initialises the `Remote` package with the `Init()` function, it passes a `DriverCallback` as a parameter, which implements the `RegisterDriver()`. The Remote Driver package can use this interface to register any of the `Dynamic` Drivers/Plugins with LibNetwork's `NetworkController`.
+
+This design ensures that the implementation details (TBD) of Dynamic Driver Registration mechanism is completely owned by the inbuilt-Remote driver, and it doesn't expose any of the driver layer to the North of LibNetwork (none of the LibNetwork client APIs are impacted).
+
+## Implementation
+
+The actual implementation of how the Inbuilt Remote Driver registers with the Dynamic Driver is Work-In-Progress. But, the Design Goal is to Honor the bigger goals of LibNetwork by keeping it Highly modular and make sure that LibNetwork is fully composable in nature.
+
+## Usage
+
+The In-Built Remote Driver follows all the rules of any other In-Built Driver and has exactly the same Driver APIs exposed. LibNetwork will also support driver-specific `options` and User-supplied `Labels` which the Dynamic Drivers can make use for its operations.
diff --git a/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go b/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go
new file mode 100644
index 0000000000..9fb41ff7b6
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go
@@ -0,0 +1,118 @@
+package driverapi
+
+import (
+ "net"
+
+ "github.com/docker/libnetwork/types"
+)
+
+// NetworkPluginEndpointType represents the Endpoint Type used by Plugin system
+const NetworkPluginEndpointType = "NetworkDriver"
+
+// Driver is an interface that every plugin driver needs to implement.
+type Driver interface {
+ // Push driver specific config to the driver
+ Config(options map[string]interface{}) error
+
+ // CreateNetwork invokes the driver method to create a network passing
+ // the network id and network specific config. The config mechanism will
+ // eventually be replaced with labels which are yet to be introduced.
+ CreateNetwork(nid types.UUID, options map[string]interface{}) error
+
+ // DeleteNetwork invokes the driver method to delete network passing
+ // the network id.
+ DeleteNetwork(nid types.UUID) error
+
+ // CreateEndpoint invokes the driver method to create an endpoint
+ // passing the network id, endpoint id endpoint information and driver
+ // specific config. The endpoint information can be either consumed by
+ // the driver or populated by the driver. The config mechanism will
+ // eventually be replaced with labels which are yet to be introduced.
+ CreateEndpoint(nid, eid types.UUID, epInfo EndpointInfo, options map[string]interface{}) error
+
+ // DeleteEndpoint invokes the driver method to delete an endpoint
+ // passing the network id and endpoint id.
+ DeleteEndpoint(nid, eid types.UUID) error
+
+ // EndpointOperInfo retrieves from the driver the operational data related to the specified endpoint
+ EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error)
+
+ // Join method is invoked when a Sandbox is attached to an endpoint.
+ Join(nid, eid types.UUID, sboxKey string, jinfo JoinInfo, options map[string]interface{}) error
+
+ // Leave method is invoked when a Sandbox detaches from an endpoint.
+ Leave(nid, eid types.UUID) error
+
+ // Type returns the the type of this driver, the network type this driver manages
+ Type() string
+}
+
+// EndpointInfo provides a go interface to fetch or populate endpoint assigned network resources.
+type EndpointInfo interface {
+ // Interfaces returns a list of interfaces bound to the endpoint.
+ // If the list is not empty the driver is only expected to consume the interfaces.
+ // It is an error to try to add interfaces to a non-empty list.
+ // If the list is empty the driver is expected to populate with 0 or more interfaces.
+ Interfaces() []InterfaceInfo
+
+ // AddInterface is used by the driver to add an interface to the interface list.
+ // This method will return an error if the driver attempts to add interfaces
+ // if the Interfaces() method returned a non-empty list.
+ // ID field need only have significance within the endpoint so it can be a simple
+ // monotonically increasing number
+ AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error
+}
+
+// InterfaceInfo provides a go interface for drivers to retrive
+// network information to interface resources.
+type InterfaceInfo interface {
+ // MacAddress returns the MAC address.
+ MacAddress() net.HardwareAddr
+
+ // Address returns the IPv4 address.
+ Address() net.IPNet
+
+ // AddressIPv6 returns the IPv6 address.
+ AddressIPv6() net.IPNet
+
+ // ID returns the numerical id of the interface and has significance only within
+ // the endpoint.
+ ID() int
+}
+
+// InterfaceNameInfo provides a go interface for the drivers to assign names
+// to interfaces.
+type InterfaceNameInfo interface {
+ // SetNames method assigns the srcName and dstPrefix for the interface.
+ SetNames(srcName, dstPrefix string) error
+
+ // ID returns the numerical id that was assigned to the interface by the driver
+ // CreateEndpoint.
+ ID() int
+}
+
+// JoinInfo represents a set of resources that the driver has the ability to provide during
+// join time.
+type JoinInfo interface {
+ // InterfaceNames returns a list of InterfaceNameInfo go interface to facilitate
+ // setting the names for the interfaces.
+ InterfaceNames() []InterfaceNameInfo
+
+ // SetGateway sets the default IPv4 gateway when a container joins the endpoint.
+ SetGateway(net.IP) error
+
+ // SetGatewayIPv6 sets the default IPv6 gateway when a container joins the endpoint.
+ SetGatewayIPv6(net.IP) error
+
+ // SetHostsPath sets the overriding /etc/hosts path to use for the container.
+ SetHostsPath(string) error
+
+ // SetResolvConfPath sets the overriding /etc/resolv.conf path to use for the container.
+ SetResolvConfPath(string) error
+}
+
+// DriverCallback provides a Callback interface for Drivers into LibNetwork
+type DriverCallback interface {
+ // RegisterDriver provides a way for Remote drivers to dynamically register new NetworkType and associate with a driver instance
+ RegisterDriver(name string, driver Driver) error
+}
diff --git a/vendor/src/github.com/docker/libnetwork/driverapi/errors.go b/vendor/src/github.com/docker/libnetwork/driverapi/errors.go
new file mode 100644
index 0000000000..041ef41506
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/driverapi/errors.go
@@ -0,0 +1,56 @@
+package driverapi
+
+import (
+ "fmt"
+)
+
+// ErrNoNetwork is returned if no network with the specified id exists
+type ErrNoNetwork string
+
+func (enn ErrNoNetwork) Error() string {
+ return fmt.Sprintf("No network (%s) exists", string(enn))
+}
+
+// NotFound denotes the type of this error
+func (enn ErrNoNetwork) NotFound() {}
+
+// ErrEndpointExists is returned if more than one endpoint is added to the network
+type ErrEndpointExists string
+
+func (ee ErrEndpointExists) Error() string {
+ return fmt.Sprintf("Endpoint (%s) already exists (Only one endpoint allowed)", string(ee))
+}
+
+// Forbidden denotes the type of this error
+func (ee ErrEndpointExists) Forbidden() {}
+
+// ErrNotImplemented is returned when a Driver has not implemented an API yet
+type ErrNotImplemented struct{}
+
+func (eni *ErrNotImplemented) Error() string {
+ return "The API is not implemented yet"
+}
+
+// NotImplemented denotes the type of this error
+func (eni *ErrNotImplemented) NotImplemented() {}
+
+// ErrNoEndpoint is returned if no endpoint with the specified id exists
+type ErrNoEndpoint string
+
+func (ene ErrNoEndpoint) Error() string {
+ return fmt.Sprintf("No endpoint (%s) exists", string(ene))
+}
+
+// NotFound denotes the type of this error
+func (ene ErrNoEndpoint) NotFound() {}
+
+// ErrActiveRegistration represents an error when a driver is registered to a networkType that is previously registered
+type ErrActiveRegistration string
+
+// Error interface for ErrActiveRegistration
+func (ar ErrActiveRegistration) Error() string {
+ return fmt.Sprintf("Driver already registered for type %q", string(ar))
+}
+
+// Forbidden denotes the type of this error
+func (ar ErrActiveRegistration) Forbidden() {}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers.go b/vendor/src/github.com/docker/libnetwork/drivers.go
new file mode 100644
index 0000000000..130f7ab343
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers.go
@@ -0,0 +1,25 @@
+package libnetwork
+
+import (
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/drivers/bridge"
+ "github.com/docker/libnetwork/drivers/host"
+ "github.com/docker/libnetwork/drivers/null"
+ "github.com/docker/libnetwork/drivers/remote"
+)
+
+type driverTable map[string]driverapi.Driver
+
+func initDrivers(dc driverapi.DriverCallback) error {
+ for _, fn := range [](func(driverapi.DriverCallback) error){
+ bridge.Init,
+ host.Init,
+ null.Init,
+ remote.Init,
+ } {
+ if err := fn(dc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
new file mode 100644
index 0000000000..b1cfe74492
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -0,0 +1,928 @@
+package bridge
+
+import (
+ "errors"
+ "net"
+ "strings"
+ "sync"
+
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/ipallocator"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/portmapper"
+ "github.com/docker/libnetwork/sandbox"
+ "github.com/docker/libnetwork/types"
+ "github.com/vishvananda/netlink"
+)
+
+const (
+ networkType = "bridge"
+ vethPrefix = "veth"
+ vethLen = 7
+ containerVethPrefix = "eth"
+ maxAllocatePortAttempts = 10
+ ifaceID = 1
+)
+
+var (
+ ipAllocator *ipallocator.IPAllocator
+ portMapper *portmapper.PortMapper
+)
+
+// Configuration info for the "bridge" driver.
+type Configuration struct {
+ EnableIPForwarding bool
+}
+
+// NetworkConfiguration for network specific configuration
+type NetworkConfiguration struct {
+ BridgeName string
+ AddressIPv4 *net.IPNet
+ FixedCIDR *net.IPNet
+ FixedCIDRv6 *net.IPNet
+ EnableIPv6 bool
+ EnableIPTables bool
+ EnableIPMasquerade bool
+ EnableICC bool
+ Mtu int
+ DefaultGatewayIPv4 net.IP
+ DefaultGatewayIPv6 net.IP
+ DefaultBindingIP net.IP
+ AllowNonDefaultBridge bool
+ EnableUserlandProxy bool
+}
+
+// EndpointConfiguration represents the user specified configuration for the sandbox endpoint
+type EndpointConfiguration struct {
+ MacAddress net.HardwareAddr
+ PortBindings []types.PortBinding
+ ExposedPorts []types.TransportPort
+}
+
+// ContainerConfiguration represents the user specified configuration for a container
+type ContainerConfiguration struct {
+ ParentEndpoints []string
+ ChildEndpoints []string
+}
+
+type bridgeEndpoint struct {
+ id types.UUID
+ intf *sandbox.Interface
+ macAddress net.HardwareAddr
+ config *EndpointConfiguration // User specified parameters
+ containerConfig *ContainerConfiguration
+ portMapping []types.PortBinding // Operation port bindings
+}
+
+type bridgeNetwork struct {
+ id types.UUID
+ bridge *bridgeInterface // The bridge's L3 interface
+ config *NetworkConfiguration
+ endpoints map[types.UUID]*bridgeEndpoint // key: endpoint id
+ sync.Mutex
+}
+
+type driver struct {
+ config *Configuration
+ network *bridgeNetwork
+ sync.Mutex
+}
+
+func init() {
+ ipAllocator = ipallocator.New()
+ portMapper = portmapper.New()
+}
+
+// New constructs a new bridge driver
+func newDriver() driverapi.Driver {
+ return &driver{}
+}
+
+// Init registers a new instance of bridge driver
+func Init(dc driverapi.DriverCallback) error {
+ return dc.RegisterDriver(networkType, newDriver())
+}
+
+// Validate performs a static validation on the network configuration parameters.
+// Whatever can be assessed a priori before attempting any programming.
+func (c *NetworkConfiguration) Validate() error {
+ if c.Mtu < 0 {
+ return ErrInvalidMtu(c.Mtu)
+ }
+
+ // If bridge v4 subnet is specified
+ if c.AddressIPv4 != nil {
+ // If Container restricted subnet is specified, it must be a subset of bridge subnet
+ if c.FixedCIDR != nil {
+ // Check Network address
+ if !c.AddressIPv4.Contains(c.FixedCIDR.IP) {
+ return &ErrInvalidContainerSubnet{}
+ }
+ // Check it is effectively a subset
+ brNetLen, _ := c.AddressIPv4.Mask.Size()
+ cnNetLen, _ := c.FixedCIDR.Mask.Size()
+ if brNetLen > cnNetLen {
+ return &ErrInvalidContainerSubnet{}
+ }
+ }
+ // If default gw is specified, it must be part of bridge subnet
+ if c.DefaultGatewayIPv4 != nil {
+ if !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {
+ return &ErrInvalidGateway{}
+ }
+ }
+ }
+
+ // If default v6 gw is specified, FixedCIDRv6 must be specified and gw must belong to FixedCIDRv6 subnet
+ if c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {
+ if c.FixedCIDRv6 == nil || !c.FixedCIDRv6.Contains(c.DefaultGatewayIPv6) {
+ return &ErrInvalidGateway{}
+ }
+ }
+
+ return nil
+}
+
+func (n *bridgeNetwork) getEndpoint(eid types.UUID) (*bridgeEndpoint, error) {
+ n.Lock()
+ defer n.Unlock()
+
+ if eid == "" {
+ return nil, InvalidEndpointIDError(eid)
+ }
+
+ if ep, ok := n.endpoints[eid]; ok {
+ return ep, nil
+ }
+
+ return nil, nil
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+ var config *Configuration
+
+ d.Lock()
+ defer d.Unlock()
+
+ if d.config != nil {
+ return &ErrConfigExists{}
+ }
+
+ genericData, ok := option[netlabel.GenericData]
+ if ok && genericData != nil {
+ switch opt := genericData.(type) {
+ case options.Generic:
+ opaqueConfig, err := options.GenerateFromModel(opt, &Configuration{})
+ if err != nil {
+ return err
+ }
+ config = opaqueConfig.(*Configuration)
+ case *Configuration:
+ config = opt
+ default:
+ return &ErrInvalidDriverConfig{}
+ }
+
+ d.config = config
+ } else {
+ config = &Configuration{}
+ }
+
+ if config.EnableIPForwarding {
+ return setupIPForwarding(config)
+ }
+
+ return nil
+}
+
+func (d *driver) getNetwork(id types.UUID) (*bridgeNetwork, error) {
+ // Just a dummy function to return the only network managed by Bridge driver.
+ // But this API makes the caller code unchanged when we move to support multiple networks.
+ d.Lock()
+ defer d.Unlock()
+ return d.network, nil
+}
+
+func parseNetworkOptions(option options.Generic) (*NetworkConfiguration, error) {
+ var config *NetworkConfiguration
+
+ genericData, ok := option[netlabel.GenericData]
+ if ok && genericData != nil {
+ switch opt := genericData.(type) {
+ case options.Generic:
+ opaqueConfig, err := options.GenerateFromModel(opt, &NetworkConfiguration{})
+ if err != nil {
+ return nil, err
+ }
+ config = opaqueConfig.(*NetworkConfiguration)
+ case *NetworkConfiguration:
+ config = opt
+ default:
+ return nil, &ErrInvalidNetworkConfig{}
+ }
+
+ if err := config.Validate(); err != nil {
+ return nil, err
+ }
+ } else {
+ config = &NetworkConfiguration{}
+ }
+
+ if _, ok := option[netlabel.EnableIPv6]; ok {
+ config.EnableIPv6 = option[netlabel.EnableIPv6].(bool)
+ }
+
+ return config, nil
+}
+
+// Create a new network using bridge plugin
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+ var err error
+
+ // Driver must be configured
+ d.Lock()
+
+ // Sanity checks
+ if d.network != nil {
+ d.Unlock()
+ return &ErrNetworkExists{}
+ }
+
+ // Create and set network handler in driver
+ d.network = &bridgeNetwork{id: id, endpoints: make(map[types.UUID]*bridgeEndpoint)}
+ network := d.network
+ d.Unlock()
+
+ // On failure make sure to reset driver network handler to nil
+ defer func() {
+ if err != nil {
+ d.Lock()
+ d.network = nil
+ d.Unlock()
+ }
+ }()
+
+ config, err := parseNetworkOptions(option)
+ if err != nil {
+ return err
+ }
+ network.config = config
+
+ // Create or retrieve the bridge L3 interface
+ bridgeIface := newInterface(config)
+ network.bridge = bridgeIface
+
+ // Prepare the bridge setup configuration
+ bridgeSetup := newBridgeSetup(config, bridgeIface)
+
+ // If the bridge interface doesn't exist, we need to start the setup steps
+ // by creating a new device and assigning it an IPv4 address.
+ bridgeAlreadyExists := bridgeIface.exists()
+ if !bridgeAlreadyExists {
+ bridgeSetup.queueStep(setupDevice)
+ }
+
+ // Even if a bridge exists try to setup IPv4.
+ bridgeSetup.queueStep(setupBridgeIPv4)
+
+ // Conditionally queue setup steps depending on configuration values.
+ for _, step := range []struct {
+ Condition bool
+ Fn setupStep
+ }{
+ // Enable IPv6 on the bridge if required. We do this even for a
+ // previously existing bridge, as it may be here from a previous
+ // installation where IPv6 wasn't supported yet and needs to be
+ // assigned an IPv6 link-local address.
+ {config.EnableIPv6, setupBridgeIPv6},
+
+ // We ensure that the bridge has the expectedIPv4 and IPv6 addresses in
+ // the case of a previously existing device.
+ {bridgeAlreadyExists, setupVerifyAndReconcile},
+
+ // Setup the bridge to allocate containers IPv4 addresses in the
+ // specified subnet.
+ {config.FixedCIDR != nil, setupFixedCIDRv4},
+
+ // Setup the bridge to allocate containers global IPv6 addresses in the
+ // specified subnet.
+ {config.FixedCIDRv6 != nil, setupFixedCIDRv6},
+
+ // Setup Loopback Adresses Routing
+ {!config.EnableUserlandProxy, setupLoopbackAdressesRouting},
+
+ // Setup IPTables.
+ {config.EnableIPTables, setupIPTables},
+
+ // Setup DefaultGatewayIPv4
+ {config.DefaultGatewayIPv4 != nil, setupGatewayIPv4},
+
+ // Setup DefaultGatewayIPv6
+ {config.DefaultGatewayIPv6 != nil, setupGatewayIPv6},
+ } {
+ if step.Condition {
+ bridgeSetup.queueStep(step.Fn)
+ }
+ }
+
+ // Block bridge IP from being allocated.
+ bridgeSetup.queueStep(allocateBridgeIP)
+ // Apply the prepared list of steps, and abort at the first error.
+ bridgeSetup.queueStep(setupDeviceUp)
+ if err = bridgeSetup.apply(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+ var err error
+
+ // Get network handler and remove it from driver
+ d.Lock()
+ n := d.network
+ d.network = nil
+ d.Unlock()
+
+ // On failure set network handler back in driver, but
+ // only if is not already taken over by some other thread
+ defer func() {
+ if err != nil {
+ d.Lock()
+ if d.network == nil {
+ d.network = n
+ }
+ d.Unlock()
+ }
+ }()
+
+ // Sanity check
+ if n == nil {
+ err = driverapi.ErrNoNetwork(nid)
+ return err
+ }
+
+ // Cannot remove network if endpoints are still present
+ if len(n.endpoints) != 0 {
+ err = ActiveEndpointsError(n.id)
+ return err
+ }
+
+ // Programming
+ err = netlink.LinkDel(n.bridge.Link)
+
+ return err
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+ var (
+ ipv6Addr *net.IPNet
+ err error
+ )
+
+ if epInfo == nil {
+ return errors.New("invalid endpoint info passed")
+ }
+
+ if len(epInfo.Interfaces()) != 0 {
+ return errors.New("non empty interface list passed to bridge(local) driver")
+ }
+
+ // Get the network handler and make sure it exists
+ d.Lock()
+ n := d.network
+ config := n.config
+ d.Unlock()
+ if n == nil {
+ return driverapi.ErrNoNetwork(nid)
+ }
+
+ // Sanity check
+ n.Lock()
+ if n.id != nid {
+ n.Unlock()
+ return InvalidNetworkIDError(nid)
+ }
+ n.Unlock()
+
+ // Check if endpoint id is good and retrieve correspondent endpoint
+ ep, err := n.getEndpoint(eid)
+ if err != nil {
+ return err
+ }
+
+ // Endpoint with that id exists either on desired or other sandbox
+ if ep != nil {
+ return driverapi.ErrEndpointExists(eid)
+ }
+
+ // Try to convert the options to endpoint configuration
+ epConfig, err := parseEndpointOptions(epOptions)
+ if err != nil {
+ return err
+ }
+
+ // Create and add the endpoint
+ n.Lock()
+ endpoint := &bridgeEndpoint{id: eid, config: epConfig}
+ n.endpoints[eid] = endpoint
+ n.Unlock()
+
+ // On failure make sure to remove the endpoint
+ defer func() {
+ if err != nil {
+ n.Lock()
+ delete(n.endpoints, eid)
+ n.Unlock()
+ }
+ }()
+
+ // Generate a name for what will be the host side pipe interface
+ name1, err := generateIfaceName()
+ if err != nil {
+ return err
+ }
+
+ // Generate a name for what will be the sandbox side pipe interface
+ name2, err := generateIfaceName()
+ if err != nil {
+ return err
+ }
+
+ // Generate and add the interface pipe host <-> sandbox
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{Name: name1, TxQLen: 0},
+ PeerName: name2}
+ if err = netlink.LinkAdd(veth); err != nil {
+ return err
+ }
+
+ // Get the host side pipe interface handler
+ host, err := netlink.LinkByName(name1)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ netlink.LinkDel(host)
+ }
+ }()
+
+ // Get the sandbox side pipe interface handler
+ sbox, err := netlink.LinkByName(name2)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ netlink.LinkDel(sbox)
+ }
+ }()
+
+ // Set the sbox's MAC. If specified, use the one configured by user, otherwise use a random one
+ mac := electMacAddress(epConfig)
+ err = netlink.LinkSetHardwareAddr(sbox, mac)
+ if err != nil {
+ return err
+ }
+ endpoint.macAddress = mac
+
+ // Add bridge inherited attributes to pipe interfaces
+ if config.Mtu != 0 {
+ err = netlink.LinkSetMTU(host, config.Mtu)
+ if err != nil {
+ return err
+ }
+ err = netlink.LinkSetMTU(sbox, config.Mtu)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Attach host side pipe interface into the bridge
+ if err = netlink.LinkSetMaster(host,
+ &netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: config.BridgeName}}); err != nil {
+ return err
+ }
+
+ // v4 address for the sandbox side pipe interface
+ ip4, err := ipAllocator.RequestIP(n.bridge.bridgeIPv4, nil)
+ if err != nil {
+ return err
+ }
+ ipv4Addr := &net.IPNet{IP: ip4, Mask: n.bridge.bridgeIPv4.Mask}
+
+ // v6 address for the sandbox side pipe interface
+ ipv6Addr = &net.IPNet{}
+ if config.EnableIPv6 {
+ var ip6 net.IP
+
+ network := n.bridge.bridgeIPv6
+ if config.FixedCIDRv6 != nil {
+ network = config.FixedCIDRv6
+ }
+
+ ones, _ := network.Mask.Size()
+ if ones <= 80 {
+ ip6 = make(net.IP, len(network.IP))
+ copy(ip6, network.IP)
+ for i, h := range mac {
+ ip6[i+10] = h
+ }
+ }
+
+ ip6, err := ipAllocator.RequestIP(network, ip6)
+ if err != nil {
+ return err
+ }
+
+ ipv6Addr = &net.IPNet{IP: ip6, Mask: network.Mask}
+ }
+
+ // Create the sandbox side pipe interface
+ intf := &sandbox.Interface{}
+ intf.SrcName = name2
+ intf.DstName = containerVethPrefix
+ intf.Address = ipv4Addr
+
+ if config.EnableIPv6 {
+ intf.AddressIPv6 = ipv6Addr
+ }
+
+ // Store the interface in endpoint, this is needed for cleanup on DeleteEndpoint()
+ endpoint.intf = intf
+
+ err = epInfo.AddInterface(ifaceID, endpoint.macAddress, *ipv4Addr, *ipv6Addr)
+ if err != nil {
+ return err
+ }
+
+ // Program any required port mapping and store them in the endpoint
+ endpoint.portMapping, err = allocatePorts(epConfig, intf, config.DefaultBindingIP, config.EnableUserlandProxy)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+ var err error
+
+ // Get the network handler and make sure it exists
+ d.Lock()
+ n := d.network
+ config := n.config
+ d.Unlock()
+ if n == nil {
+ return driverapi.ErrNoNetwork(nid)
+ }
+
+ // Sanity Check
+ n.Lock()
+ if n.id != nid {
+ n.Unlock()
+ return InvalidNetworkIDError(nid)
+ }
+ n.Unlock()
+
+ // Check endpoint id and if an endpoint is actually there
+ ep, err := n.getEndpoint(eid)
+ if err != nil {
+ return err
+ }
+ if ep == nil {
+ return EndpointNotFoundError(eid)
+ }
+
+ // Remove it
+ n.Lock()
+ delete(n.endpoints, eid)
+ n.Unlock()
+
+ // On failure make sure to set back ep in n.endpoints, but only
+ // if it hasn't been taken over already by some other thread.
+ defer func() {
+ if err != nil {
+ n.Lock()
+ if _, ok := n.endpoints[eid]; !ok {
+ n.endpoints[eid] = ep
+ }
+ n.Unlock()
+ }
+ }()
+
+ // Remove port mappings. Do not stop endpoint delete on unmap failure
+ releasePorts(ep)
+
+ // Release the v4 address allocated to this endpoint's sandbox interface
+ err = ipAllocator.ReleaseIP(n.bridge.bridgeIPv4, ep.intf.Address.IP)
+ if err != nil {
+ return err
+ }
+
+ // Release the v6 address allocated to this endpoint's sandbox interface
+ if config.EnableIPv6 {
+ err := ipAllocator.ReleaseIP(n.bridge.bridgeIPv6, ep.intf.AddressIPv6.IP)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Try removal of link. Discard error: link pair might have
+ // already been deleted by sandbox delete.
+ link, err := netlink.LinkByName(ep.intf.SrcName)
+ if err == nil {
+ netlink.LinkDel(link)
+ }
+
+ return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+ // Get the network handler and make sure it exists
+ d.Lock()
+ n := d.network
+ d.Unlock()
+ if n == nil {
+ return nil, driverapi.ErrNoNetwork(nid)
+ }
+
+ // Sanity check
+ n.Lock()
+ if n.id != nid {
+ n.Unlock()
+ return nil, InvalidNetworkIDError(nid)
+ }
+ n.Unlock()
+
+ // Check if endpoint id is good and retrieve correspondent endpoint
+ ep, err := n.getEndpoint(eid)
+ if err != nil {
+ return nil, err
+ }
+ if ep == nil {
+ return nil, driverapi.ErrNoEndpoint(eid)
+ }
+
+ m := make(map[string]interface{})
+
+ if ep.portMapping != nil {
+ // Return a copy of the operational data
+ pmc := make([]types.PortBinding, 0, len(ep.portMapping))
+ for _, pm := range ep.portMapping {
+ pmc = append(pmc, pm.GetCopy())
+ }
+ m[netlabel.PortMap] = pmc
+ }
+
+ if len(ep.macAddress) != 0 {
+ m[netlabel.MacAddress] = ep.macAddress
+ }
+
+ return m, nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+ network, err := d.getNetwork(nid)
+ if err != nil {
+ return err
+ }
+
+ endpoint, err := network.getEndpoint(eid)
+ if err != nil {
+ return err
+ }
+
+ if endpoint == nil {
+ return EndpointNotFoundError(eid)
+ }
+
+ for _, iNames := range jinfo.InterfaceNames() {
+ // Make sure to set names on the correct interface ID.
+ if iNames.ID() == ifaceID {
+ err = iNames.SetNames(endpoint.intf.SrcName, endpoint.intf.DstName)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ err = jinfo.SetGateway(network.bridge.gatewayIPv4)
+ if err != nil {
+ return err
+ }
+
+ err = jinfo.SetGatewayIPv6(network.bridge.gatewayIPv6)
+ if err != nil {
+ return err
+ }
+
+ if !network.config.EnableICC {
+ return d.link(network, endpoint, options, true)
+ }
+
+ return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+ network, err := d.getNetwork(nid)
+ if err != nil {
+ return err
+ }
+
+ endpoint, err := network.getEndpoint(eid)
+ if err != nil {
+ return err
+ }
+
+ if endpoint == nil {
+ return EndpointNotFoundError(eid)
+ }
+
+ if !network.config.EnableICC {
+ return d.link(network, endpoint, nil, false)
+ }
+
+ return nil
+}
+
+func (d *driver) link(network *bridgeNetwork, endpoint *bridgeEndpoint, options map[string]interface{}, enable bool) error {
+ var (
+ cc *ContainerConfiguration
+ err error
+ )
+
+ if enable {
+ cc, err = parseContainerOptions(options)
+ if err != nil {
+ return err
+ }
+ } else {
+ cc = endpoint.containerConfig
+ }
+
+ if cc == nil {
+ return nil
+ }
+
+ if endpoint.config != nil && endpoint.config.ExposedPorts != nil {
+ for _, p := range cc.ParentEndpoints {
+ var parentEndpoint *bridgeEndpoint
+ parentEndpoint, err = network.getEndpoint(types.UUID(p))
+ if err != nil {
+ return err
+ }
+ if parentEndpoint == nil {
+ err = InvalidEndpointIDError(p)
+ return err
+ }
+
+ l := newLink(parentEndpoint.intf.Address.IP.String(),
+ endpoint.intf.Address.IP.String(),
+ endpoint.config.ExposedPorts, network.config.BridgeName)
+ if enable {
+ err = l.Enable()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ l.Disable()
+ }
+ }()
+ } else {
+ l.Disable()
+ }
+ }
+ }
+
+ for _, c := range cc.ChildEndpoints {
+ var childEndpoint *bridgeEndpoint
+ childEndpoint, err = network.getEndpoint(types.UUID(c))
+ if err != nil {
+ return err
+ }
+ if childEndpoint == nil {
+ err = InvalidEndpointIDError(c)
+ return err
+ }
+ if childEndpoint.config == nil || childEndpoint.config.ExposedPorts == nil {
+ continue
+ }
+
+ l := newLink(endpoint.intf.Address.IP.String(),
+ childEndpoint.intf.Address.IP.String(),
+ childEndpoint.config.ExposedPorts, network.config.BridgeName)
+ if enable {
+ err = l.Enable()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ l.Disable()
+ }
+ }()
+ } else {
+ l.Disable()
+ }
+ }
+
+ if enable {
+ endpoint.containerConfig = cc
+ }
+
+ return nil
+}
+
+func (d *driver) Type() string {
+ return networkType
+}
+
+func parseEndpointOptions(epOptions map[string]interface{}) (*EndpointConfiguration, error) {
+ if epOptions == nil {
+ return nil, nil
+ }
+
+ ec := &EndpointConfiguration{}
+
+ if opt, ok := epOptions[netlabel.MacAddress]; ok {
+ if mac, ok := opt.(net.HardwareAddr); ok {
+ ec.MacAddress = mac
+ } else {
+ return nil, &ErrInvalidEndpointConfig{}
+ }
+ }
+
+ if opt, ok := epOptions[netlabel.PortMap]; ok {
+ if bs, ok := opt.([]types.PortBinding); ok {
+ ec.PortBindings = bs
+ } else {
+ return nil, &ErrInvalidEndpointConfig{}
+ }
+ }
+
+ if opt, ok := epOptions[netlabel.ExposedPorts]; ok {
+ if ports, ok := opt.([]types.TransportPort); ok {
+ ec.ExposedPorts = ports
+ } else {
+ return nil, &ErrInvalidEndpointConfig{}
+ }
+ }
+
+ return ec, nil
+}
+
+func parseContainerOptions(cOptions map[string]interface{}) (*ContainerConfiguration, error) {
+ if cOptions == nil {
+ return nil, nil
+ }
+ genericData := cOptions[netlabel.GenericData]
+ if genericData == nil {
+ return nil, nil
+ }
+ switch opt := genericData.(type) {
+ case options.Generic:
+ opaqueConfig, err := options.GenerateFromModel(opt, &ContainerConfiguration{})
+ if err != nil {
+ return nil, err
+ }
+ return opaqueConfig.(*ContainerConfiguration), nil
+ case *ContainerConfiguration:
+ return opt, nil
+ default:
+ return nil, nil
+ }
+}
+
+func electMacAddress(epConfig *EndpointConfiguration) net.HardwareAddr {
+ if epConfig != nil && epConfig.MacAddress != nil {
+ return epConfig.MacAddress
+ }
+ return netutils.GenerateRandomMAC()
+}
+
+// Generates a name to be used for a virtual ethernet
+// interface. The name is constructed by 'veth' appended
+// by a randomly generated hex value. (example: veth0f60e2c)
+func generateIfaceName() (string, error) {
+ for i := 0; i < 3; i++ {
+ name, err := netutils.GenerateRandomName(vethPrefix, vethLen)
+ if err != nil {
+ continue
+ }
+ if _, err := net.InterfaceByName(name); err != nil {
+ if strings.Contains(err.Error(), "no such") {
+ return name, nil
+ }
+ return "", err
+ }
+ }
+ return "", &ErrIfaceName{}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go
new file mode 100644
index 0000000000..f8967555ec
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go
@@ -0,0 +1,532 @@
+package bridge
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "regexp"
+ "testing"
+
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/types"
+ "github.com/vishvananda/netlink"
+)
+
+func TestCreateFullOptions(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &Configuration{
+ EnableIPForwarding: true,
+ }
+
+ netConfig := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPv6: true,
+ FixedCIDR: bridgeNetworks[0],
+ EnableIPTables: true,
+ }
+ _, netConfig.FixedCIDRv6, _ = net.ParseCIDR("2001:db8::/48")
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ if err := d.Config(genericOption); err != nil {
+ t.Fatalf("Failed to setup driver config: %v", err)
+ }
+
+ netOption := make(map[string]interface{})
+ netOption[netlabel.GenericData] = netConfig
+
+ err := d.CreateNetwork("dummy", netOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+}
+
+func TestCreate(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ if err := d.CreateNetwork("dummy", genericOption); err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+}
+
+func TestCreateFail(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{BridgeName: "dummy0"}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ if err := d.CreateNetwork("dummy", genericOption); err == nil {
+ t.Fatal("Bridge creation was expected to fail")
+ }
+}
+
+type testInterface struct {
+ id int
+ mac net.HardwareAddr
+ addr net.IPNet
+ addrv6 net.IPNet
+ srcName string
+ dstName string
+}
+
+type testEndpoint struct {
+ ifaces []*testInterface
+ gw net.IP
+ gw6 net.IP
+ hostsPath string
+ resolvConfPath string
+}
+
+func (te *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
+ iList := make([]driverapi.InterfaceInfo, len(te.ifaces))
+
+ for i, iface := range te.ifaces {
+ iList[i] = iface
+ }
+
+ return iList
+}
+
+func (te *testEndpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+ iface := &testInterface{id: id, addr: ipv4, addrv6: ipv6}
+ te.ifaces = append(te.ifaces, iface)
+ return nil
+}
+
+func (i *testInterface) ID() int {
+ return i.id
+}
+
+func (i *testInterface) MacAddress() net.HardwareAddr {
+ return i.mac
+}
+
+func (i *testInterface) Address() net.IPNet {
+ return i.addr
+}
+
+func (i *testInterface) AddressIPv6() net.IPNet {
+ return i.addrv6
+}
+
+func (i *testInterface) SetNames(srcName string, dstName string) error {
+ i.srcName = srcName
+ i.dstName = dstName
+ return nil
+}
+
+func (te *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+ iList := make([]driverapi.InterfaceNameInfo, len(te.ifaces))
+
+ for i, iface := range te.ifaces {
+ iList[i] = iface
+ }
+
+ return iList
+}
+
+func (te *testEndpoint) SetGateway(gw net.IP) error {
+ te.gw = gw
+ return nil
+}
+
+func (te *testEndpoint) SetGatewayIPv6(gw6 net.IP) error {
+ te.gw6 = gw6
+ return nil
+}
+
+func (te *testEndpoint) SetHostsPath(path string) error {
+ te.hostsPath = path
+ return nil
+}
+
+func (te *testEndpoint) SetResolvConfPath(path string) error {
+ te.resolvConfPath = path
+ return nil
+}
+
+func TestQueryEndpointInfo(t *testing.T) {
+ testQueryEndpointInfo(t, true)
+}
+
+func TestQueryEndpointInfoHairpin(t *testing.T) {
+ testQueryEndpointInfo(t, false)
+}
+
+func testQueryEndpointInfo(t *testing.T, ulPxyEnabled bool) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+ dd, _ := d.(*driver)
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPTables: true,
+ EnableICC: false,
+ EnableUserlandProxy: ulPxyEnabled,
+ }
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("net1", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ portMappings := getPortMapping()
+ epOptions := make(map[string]interface{})
+ epOptions[netlabel.PortMap] = portMappings
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("net1", "ep1", te, epOptions)
+ if err != nil {
+ t.Fatalf("Failed to create an endpoint : %s", err.Error())
+ }
+
+ ep, _ := dd.network.endpoints["ep1"]
+ data, err := d.EndpointOperInfo(dd.network.id, ep.id)
+ if err != nil {
+ t.Fatalf("Failed to ask for endpoint operational data: %v", err)
+ }
+ pmd, ok := data[netlabel.PortMap]
+ if !ok {
+ t.Fatalf("Endpoint operational data does not contain port mapping data")
+ }
+ pm, ok := pmd.([]types.PortBinding)
+ if !ok {
+ t.Fatalf("Unexpected format for port mapping in endpoint operational data")
+ }
+ if len(ep.portMapping) != len(pm) {
+ t.Fatalf("Incomplete data for port mapping in endpoint operational data")
+ }
+ for i, pb := range ep.portMapping {
+ if !pb.Equal(&pm[i]) {
+ t.Fatalf("Unexpected data for port mapping in endpoint operational data")
+ }
+ }
+
+ // Cleanup as host ports are there
+ err = releasePorts(ep)
+ if err != nil {
+ t.Fatalf("Failed to release mapped ports: %v", err)
+ }
+}
+
+func TestCreateLinkWithOptions(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+ netOptions := make(map[string]interface{})
+ netOptions[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("net1", netOptions)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ mac := net.HardwareAddr([]byte{0x1e, 0x67, 0x66, 0x44, 0x55, 0x66})
+ epOptions := make(map[string]interface{})
+ epOptions[netlabel.MacAddress] = mac
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("net1", "ep", te, epOptions)
+ if err != nil {
+ t.Fatalf("Failed to create an endpoint: %s", err.Error())
+ }
+
+ err = d.Join("net1", "ep", "sbox", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to join the endpoint: %v", err)
+ }
+
+ ifaceName := te.ifaces[0].srcName
+ veth, err := netlink.LinkByName(ifaceName)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(mac, veth.Attrs().HardwareAddr) {
+ t.Fatalf("Failed to parse and program endpoint configuration")
+ }
+}
+
+func getExposedPorts() []types.TransportPort {
+ return []types.TransportPort{
+ types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+ types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+ types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+ }
+}
+
+func getPortMapping() []types.PortBinding {
+ return []types.PortBinding{
+ types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+ types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+ types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+ }
+}
+
+func TestLinkContainers(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ d := newDriver()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPTables: true,
+ EnableICC: false,
+ }
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("net1", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ exposedPorts := getExposedPorts()
+ epOptions := make(map[string]interface{})
+ epOptions[netlabel.ExposedPorts] = exposedPorts
+
+ te1 := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("net1", "ep1", te1, epOptions)
+ if err != nil {
+ t.Fatalf("Failed to create an endpoint : %s", err.Error())
+ }
+
+ addr1 := te1.ifaces[0].addr
+ if addr1.IP.To4() == nil {
+ t.Fatalf("No Ipv4 address assigned to the endpoint: ep1")
+ }
+
+ te2 := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("net1", "ep2", te2, nil)
+ if err != nil {
+ t.Fatalf("Failed to create an endpoint : %s", err.Error())
+ }
+
+ addr2 := te2.ifaces[0].addr
+ if addr2.IP.To4() == nil {
+ t.Fatalf("No Ipv4 address assigned to the endpoint: ep2")
+ }
+
+ ce := []string{"ep1"}
+ cConfig := &ContainerConfiguration{ChildEndpoints: ce}
+ genericOption = make(map[string]interface{})
+ genericOption[netlabel.GenericData] = cConfig
+
+ err = d.Join("net1", "ep2", "", te2, genericOption)
+ if err != nil {
+ t.Fatalf("Failed to link ep1 and ep2")
+ }
+
+ out, err := iptables.Raw("-L", DockerChain)
+ for _, pm := range exposedPorts {
+ regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+ re := regexp.MustCompile(regex)
+ matches := re.FindAllString(string(out[:]), -1)
+ if len(matches) != 1 {
+ t.Fatalf("IP Tables programming failed %s", string(out[:]))
+ }
+
+ regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+ matched, _ := regexp.MatchString(regex, string(out[:]))
+ if !matched {
+ t.Fatalf("IP Tables programming failed %s", string(out[:]))
+ }
+ }
+
+ err = d.Leave("net1", "ep2")
+ if err != nil {
+ t.Fatalf("Failed to unlink ep1 and ep2")
+ }
+
+ out, err = iptables.Raw("-L", DockerChain)
+ for _, pm := range exposedPorts {
+ regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+ re := regexp.MustCompile(regex)
+ matches := re.FindAllString(string(out[:]), -1)
+ if len(matches) != 0 {
+ t.Fatalf("Leave should have deleted relevant IPTables rules %s", string(out[:]))
+ }
+
+ regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+ matched, _ := regexp.MatchString(regex, string(out[:]))
+ if matched {
+ t.Fatalf("Leave should have deleted relevant IPTables rules %s", string(out[:]))
+ }
+ }
+
+ // Error condition test with an invalid endpoint-id "ep4"
+ ce = []string{"ep1", "ep4"}
+ cConfig = &ContainerConfiguration{ChildEndpoints: ce}
+ genericOption = make(map[string]interface{})
+ genericOption[netlabel.GenericData] = cConfig
+
+ err = d.Join("net1", "ep2", "", te2, genericOption)
+ if err != nil {
+ out, err = iptables.Raw("-L", DockerChain)
+ for _, pm := range exposedPorts {
+ regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+ re := regexp.MustCompile(regex)
+ matches := re.FindAllString(string(out[:]), -1)
+ if len(matches) != 0 {
+ t.Fatalf("Error handling should rollback relevant IPTables rules %s", string(out[:]))
+ }
+
+ regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+ matched, _ := regexp.MatchString(regex, string(out[:]))
+ if matched {
+ t.Fatalf("Error handling should rollback relevant IPTables rules %s", string(out[:]))
+ }
+ }
+ } else {
+ t.Fatalf("Expected Join to fail given link conditions are not satisfied")
+ }
+}
+
+func TestValidateConfig(t *testing.T) {
+
+ // Test mtu
+ c := NetworkConfiguration{Mtu: -2}
+ err := c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid MTU number")
+ }
+
+ c.Mtu = 9000
+ err = c.Validate()
+ if err != nil {
+ t.Fatalf("unexpected validation error on MTU number")
+ }
+
+ // Bridge network
+ _, network, _ := net.ParseCIDR("172.28.0.0/16")
+
+ // Test FixedCIDR
+ _, containerSubnet, _ := net.ParseCIDR("172.27.0.0/16")
+ c = NetworkConfiguration{
+ AddressIPv4: network,
+ FixedCIDR: containerSubnet,
+ }
+
+ err = c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid FixedCIDR network")
+ }
+
+ _, containerSubnet, _ = net.ParseCIDR("172.28.0.0/16")
+ c.FixedCIDR = containerSubnet
+ err = c.Validate()
+ if err != nil {
+ t.Fatalf("Unexpected validation error on FixedCIDR network")
+ }
+
+ _, containerSubnet, _ = net.ParseCIDR("172.28.0.0/15")
+ c.FixedCIDR = containerSubnet
+ err = c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid FixedCIDR network")
+ }
+
+ _, containerSubnet, _ = net.ParseCIDR("172.28.0.0/17")
+ c.FixedCIDR = containerSubnet
+ err = c.Validate()
+ if err != nil {
+ t.Fatalf("Unexpected validation error on FixedCIDR network")
+ }
+
+ // Test v4 gw
+ c.DefaultGatewayIPv4 = net.ParseIP("172.27.30.234")
+ err = c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid default gateway")
+ }
+
+ c.DefaultGatewayIPv4 = net.ParseIP("172.28.30.234")
+ err = c.Validate()
+ if err != nil {
+ t.Fatalf("Unexpected validation error on default gateway")
+ }
+
+ // Test v6 gw
+ _, containerSubnet, _ = net.ParseCIDR("2001:1234:ae:b004::/64")
+ c = NetworkConfiguration{
+ EnableIPv6: true,
+ FixedCIDRv6: containerSubnet,
+ DefaultGatewayIPv6: net.ParseIP("2001:1234:ac:b004::bad:a55"),
+ }
+ err = c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid v6 default gateway")
+ }
+
+ c.DefaultGatewayIPv6 = net.ParseIP("2001:1234:ae:b004::bad:a55")
+ err = c.Validate()
+ if err != nil {
+ t.Fatalf("Unexpected validation error on v6 default gateway")
+ }
+
+ c.FixedCIDRv6 = nil
+ err = c.Validate()
+ if err == nil {
+ t.Fatalf("Failed to detect invalid v6 default gateway")
+ }
+}
+
+func TestSetDefaultGw(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ _, subnetv6, _ := net.ParseCIDR("2001:db8:ea9:9abc:b0c4::/80")
+ gw4 := bridgeNetworks[0].IP.To4()
+ gw4[3] = 254
+ gw6 := net.ParseIP("2001:db8:ea9:9abc:b0c4::254")
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPv6: true,
+ FixedCIDRv6: subnetv6,
+ DefaultGatewayIPv4: gw4,
+ DefaultGatewayIPv6: gw6,
+ }
+
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("dummy", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to create endpoint: %v", err)
+ }
+
+ err = d.Join("dummy", "ep", "sbox", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to join endpoint: %v", err)
+ }
+
+ if !gw4.Equal(te.gw) {
+ t.Fatalf("Failed to configure default gateway. Expected %v. Found %v", gw4, te.gw)
+ }
+
+ if !gw6.Equal(te.gw6) {
+ t.Fatalf("Failed to configure default gateway. Expected %v. Found %v", gw6, te.gw6)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
new file mode 100644
index 0000000000..d22912c5c7
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
@@ -0,0 +1,341 @@
+package bridge
+
+import (
+ "fmt"
+ "net"
+)
+
+// ErrConfigExists error is returned when driver already has a config applied.
+type ErrConfigExists struct{}
+
+func (ece *ErrConfigExists) Error() string {
+ return "configuration already exists, bridge configuration can be applied only once"
+}
+
+// Forbidden denotes the type of this error
+func (ece *ErrConfigExists) Forbidden() {}
+
+// ErrInvalidDriverConfig error is returned when Bridge Driver is passed an invalid config
+type ErrInvalidDriverConfig struct{}
+
+func (eidc *ErrInvalidDriverConfig) Error() string {
+ return "Invalid configuration passed to Bridge Driver"
+}
+
+// BadRequest denotes the type of this error
+func (eidc *ErrInvalidDriverConfig) BadRequest() {}
+
+// ErrInvalidNetworkConfig error is returned when a network is created on a driver without valid config.
+type ErrInvalidNetworkConfig struct{}
+
+func (einc *ErrInvalidNetworkConfig) Error() string {
+ return "trying to create a network on a driver without valid config"
+}
+
+// Forbidden denotes the type of this error
+func (einc *ErrInvalidNetworkConfig) Forbidden() {}
+
+// ErrInvalidContainerConfig error is returned when a endpoint create is attempted with an invalid configuration.
+type ErrInvalidContainerConfig struct{}
+
+func (eicc *ErrInvalidContainerConfig) Error() string {
+ return "Error in joining a container due to invalid configuration"
+}
+
+// BadRequest denotes the type of this error
+func (eicc *ErrInvalidContainerConfig) BadRequest() {}
+
+// ErrInvalidEndpointConfig error is returned when a endpoint create is attempted with an invalid endpoint configuration.
+type ErrInvalidEndpointConfig struct{}
+
+func (eiec *ErrInvalidEndpointConfig) Error() string {
+ return "trying to create an endpoint with an invalid endpoint configuration"
+}
+
+// BadRequest denotes the type of this error
+func (eiec *ErrInvalidEndpointConfig) BadRequest() {}
+
+// ErrNetworkExists error is returned when a network already exists and another network is created.
+type ErrNetworkExists struct{}
+
+func (ene *ErrNetworkExists) Error() string {
+ return "network already exists, bridge can only have one network"
+}
+
+// Forbidden denotes the type of this error
+func (ene *ErrNetworkExists) Forbidden() {}
+
+// ErrIfaceName error is returned when a new name could not be generated.
+type ErrIfaceName struct{}
+
+func (ein *ErrIfaceName) Error() string {
+ return "failed to find name for new interface"
+}
+
+// InternalError denotes the type of this error
+func (ein *ErrIfaceName) InternalError() {}
+
+// ErrNoIPAddr error is returned when bridge has no IPv4 address configured.
+type ErrNoIPAddr struct{}
+
+func (enip *ErrNoIPAddr) Error() string {
+ return "bridge has no IPv4 address configured"
+}
+
+// InternalError denotes the type of this error
+func (enip *ErrNoIPAddr) InternalError() {}
+
+// ErrInvalidGateway is returned when the user provided default gateway (v4/v6) is not not valid.
+type ErrInvalidGateway struct{}
+
+func (eig *ErrInvalidGateway) Error() string {
+ return "default gateway ip must be part of the network"
+}
+
+// BadRequest denotes the type of this error
+func (eig *ErrInvalidGateway) BadRequest() {}
+
+// ErrInvalidContainerSubnet is returned when the container subnet (FixedCIDR) is not valid.
+type ErrInvalidContainerSubnet struct{}
+
+func (eis *ErrInvalidContainerSubnet) Error() string {
+ return "container subnet must be a subset of bridge network"
+}
+
+// BadRequest denotes the type of this error
+func (eis *ErrInvalidContainerSubnet) BadRequest() {}
+
+// ErrInvalidMtu is returned when the user provided MTU is not valid.
+type ErrInvalidMtu int
+
+func (eim ErrInvalidMtu) Error() string {
+ return fmt.Sprintf("invalid MTU number: %d", int(eim))
+}
+
+// BadRequest denotes the type of this error
+func (eim ErrInvalidMtu) BadRequest() {}
+
+// ErrIPFwdCfg is returned when ip forwarding setup is invoked when the configuration
+// not enabled.
+type ErrIPFwdCfg struct{}
+
+func (eipf *ErrIPFwdCfg) Error() string {
+ return "unexpected request to enable IP Forwarding"
+}
+
+// BadRequest denotes the type of this error
+func (eipf *ErrIPFwdCfg) BadRequest() {}
+
+// ErrInvalidPort is returned when the container or host port specified in the port binding is not valid.
+type ErrInvalidPort string
+
+func (ip ErrInvalidPort) Error() string {
+ return fmt.Sprintf("invalid transport port: %s", string(ip))
+}
+
+// BadRequest denotes the type of this error
+func (ip ErrInvalidPort) BadRequest() {}
+
+// ErrUnsupportedAddressType is returned when the specified address type is not supported.
+type ErrUnsupportedAddressType string
+
+func (uat ErrUnsupportedAddressType) Error() string {
+ return fmt.Sprintf("unsupported address type: %s", string(uat))
+}
+
+// BadRequest denotes the type of this error
+func (uat ErrUnsupportedAddressType) BadRequest() {}
+
+// ErrInvalidAddressBinding is returned when the host address specified in the port binding is not valid.
+type ErrInvalidAddressBinding string
+
+func (iab ErrInvalidAddressBinding) Error() string {
+ return fmt.Sprintf("invalid host address in port binding: %s", string(iab))
+}
+
+// BadRequest denotes the type of this error
+func (iab ErrInvalidAddressBinding) BadRequest() {}
+
+// ActiveEndpointsError is returned when there are
+// still active endpoints in the network being deleted.
+type ActiveEndpointsError string
+
+func (aee ActiveEndpointsError) Error() string {
+ return fmt.Sprintf("network %s has active endpoint", string(aee))
+}
+
+// Forbidden denotes the type of this error
+func (aee ActiveEndpointsError) Forbidden() {}
+
+// InvalidNetworkIDError is returned when the passed
+// network id for an existing network is not a known id.
+type InvalidNetworkIDError string
+
+func (inie InvalidNetworkIDError) Error() string {
+ return fmt.Sprintf("invalid network id %s", string(inie))
+}
+
+// NotFound denotes the type of this error
+func (inie InvalidNetworkIDError) NotFound() {}
+
+// InvalidEndpointIDError is returned when the passed
+// endpoint id is not valid.
+type InvalidEndpointIDError string
+
+func (ieie InvalidEndpointIDError) Error() string {
+ return fmt.Sprintf("invalid endpoint id: %s", string(ieie))
+}
+
+// BadRequest denotes the type of this error
+func (ieie InvalidEndpointIDError) BadRequest() {}
+
+// InvalidSandboxIDError is returned when the passed
+// sandbox id is not valid.
+type InvalidSandboxIDError string
+
+func (isie InvalidSandboxIDError) Error() string {
+ return fmt.Sprintf("invalid sanbox id: %s", string(isie))
+}
+
+// BadRequest denotes the type of this error
+func (isie InvalidSandboxIDError) BadRequest() {}
+
+// EndpointNotFoundError is returned when the no endpoint
+// with the passed endpoint id is found.
+type EndpointNotFoundError string
+
+func (enfe EndpointNotFoundError) Error() string {
+ return fmt.Sprintf("endpoint not found: %s", string(enfe))
+}
+
+// NotFound denotes the type of this error
+func (enfe EndpointNotFoundError) NotFound() {}
+
+// NonDefaultBridgeExistError is returned when a non-default
+// bridge config is passed but it does not already exist.
+type NonDefaultBridgeExistError string
+
+func (ndbee NonDefaultBridgeExistError) Error() string {
+ return fmt.Sprintf("bridge device with non default name %s must be created manually", string(ndbee))
+}
+
+// Forbidden denotes the type of this error
+func (ndbee NonDefaultBridgeExistError) Forbidden() {}
+
+// FixedCIDRv4Error is returned when fixed-cidrv4 configuration
+// failed.
+type FixedCIDRv4Error struct {
+ Net *net.IPNet
+ Subnet *net.IPNet
+ Err error
+}
+
+func (fcv4 *FixedCIDRv4Error) Error() string {
+ return fmt.Sprintf("setup FixedCIDRv4 failed for subnet %s in %s: %v", fcv4.Subnet, fcv4.Net, fcv4.Err)
+}
+
+// InternalError denotes the type of this error
+func (fcv4 *FixedCIDRv4Error) InternalError() {}
+
+// FixedCIDRv6Error is returned when fixed-cidrv6 configuration
+// failed.
+type FixedCIDRv6Error struct {
+ Net *net.IPNet
+ Err error
+}
+
+func (fcv6 *FixedCIDRv6Error) Error() string {
+ return fmt.Sprintf("setup FixedCIDRv6 failed for subnet %s in %s: %v", fcv6.Net, fcv6.Net, fcv6.Err)
+}
+
+// InternalError denotes the type of this error
+func (fcv6 *FixedCIDRv6Error) InternalError() {}
+
+// IPTableCfgError is returned when an unexpected ip tables configuration is entered
+type IPTableCfgError string
+
+func (name IPTableCfgError) Error() string {
+ return fmt.Sprintf("unexpected request to set IP tables for interface: %s", string(name))
+}
+
+// BadRequest denotes the type of this error
+func (name IPTableCfgError) BadRequest() {}
+
+// InvalidIPTablesCfgError is returned when an invalid ip tables configuration is entered
+type InvalidIPTablesCfgError string
+
+func (action InvalidIPTablesCfgError) Error() string {
+ return fmt.Sprintf("Invalid IPTables action '%s'", string(action))
+}
+
+// BadRequest denotes the type of this error
+func (action InvalidIPTablesCfgError) BadRequest() {}
+
+// IPv4AddrRangeError is returned when a valid IP address range couldn't be found.
+type IPv4AddrRangeError string
+
+func (name IPv4AddrRangeError) Error() string {
+ return fmt.Sprintf("can't find an address range for interface %q", string(name))
+}
+
+// BadRequest denotes the type of this error
+func (name IPv4AddrRangeError) BadRequest() {}
+
+// IPv4AddrAddError is returned when IPv4 address could not be added to the bridge.
+type IPv4AddrAddError struct {
+ IP *net.IPNet
+ Err error
+}
+
+func (ipv4 *IPv4AddrAddError) Error() string {
+ return fmt.Sprintf("failed to add IPv4 address %s to bridge: %v", ipv4.IP, ipv4.Err)
+}
+
+// InternalError denotes the type of this error
+func (ipv4 *IPv4AddrAddError) InternalError() {}
+
+// IPv6AddrAddError is returned when IPv6 address could not be added to the bridge.
+type IPv6AddrAddError struct {
+ IP *net.IPNet
+ Err error
+}
+
+func (ipv6 *IPv6AddrAddError) Error() string {
+ return fmt.Sprintf("failed to add IPv6 address %s to bridge: %v", ipv6.IP, ipv6.Err)
+}
+
+// InternalError denotes the type of this error
+func (ipv6 *IPv6AddrAddError) InternalError() {}
+
+// IPv4AddrNoMatchError is returned when the bridge's IPv4 address does not match configured.
+type IPv4AddrNoMatchError struct {
+ IP net.IP
+ CfgIP net.IP
+}
+
+func (ipv4 *IPv4AddrNoMatchError) Error() string {
+ return fmt.Sprintf("bridge IPv4 (%s) does not match requested configuration %s", ipv4.IP, ipv4.CfgIP)
+}
+
+// BadRequest denotes the type of this error
+func (ipv4 *IPv4AddrNoMatchError) BadRequest() {}
+
+// IPv6AddrNoMatchError is returned when the bridge's IPv6 address does not match configured.
+type IPv6AddrNoMatchError net.IPNet
+
+func (ipv6 *IPv6AddrNoMatchError) Error() string {
+ return fmt.Sprintf("bridge IPv6 addresses do not match the expected bridge configuration %s", (*net.IPNet)(ipv6).String())
+}
+
+// BadRequest denotes the type of this error
+func (ipv6 *IPv6AddrNoMatchError) BadRequest() {}
+
+// InvalidLinkIPAddrError is returned when a link is configured to a container with an invalid ip address
+type InvalidLinkIPAddrError string
+
+func (address InvalidLinkIPAddrError) Error() string {
+ return fmt.Sprintf("Cannot link to a container with Invalid IP Address '%s'", string(address))
+}
+
+// BadRequest denotes the type of this error
+func (address InvalidLinkIPAddrError) BadRequest() {}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
new file mode 100644
index 0000000000..215a7f48f2
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
@@ -0,0 +1,63 @@
+package bridge
+
+import (
+ "net"
+
+ "github.com/vishvananda/netlink"
+)
+
+const (
+ // DefaultBridgeName is the default name for the bridge interface managed
+ // by the driver when unspecified by the caller.
+ DefaultBridgeName = "docker0"
+)
+
+// Interface models the bridge network device.
+type bridgeInterface struct {
+ Link netlink.Link
+ bridgeIPv4 *net.IPNet
+ bridgeIPv6 *net.IPNet
+ gatewayIPv4 net.IP
+ gatewayIPv6 net.IP
+}
+
+// newInterface creates a new bridge interface structure. It attempts to find
+// an already existing device identified by the Configuration BridgeName field,
+// or the default bridge name when unspecified), but doesn't attempt to create
+// one when missing
+func newInterface(config *NetworkConfiguration) *bridgeInterface {
+ i := &bridgeInterface{}
+
+ // Initialize the bridge name to the default if unspecified.
+ if config.BridgeName == "" {
+ config.BridgeName = DefaultBridgeName
+ }
+
+ // Attempt to find an existing bridge named with the specified name.
+ i.Link, _ = netlink.LinkByName(config.BridgeName)
+ return i
+}
+
+// exists indicates if the existing bridge interface exists on the system.
+func (i *bridgeInterface) exists() bool {
+ return i.Link != nil
+}
+
+// addresses returns a single IPv4 address and all IPv6 addresses for the
+// bridge interface.
+func (i *bridgeInterface) addresses() (netlink.Addr, []netlink.Addr, error) {
+ v4addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V4)
+ if err != nil {
+ return netlink.Addr{}, nil, err
+ }
+
+ v6addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V6)
+ if err != nil {
+ return netlink.Addr{}, nil, err
+ }
+
+ if len(v4addr) == 0 {
+ return netlink.Addr{}, v6addr, nil
+ }
+ return v4addr[0], v6addr, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go
new file mode 100644
index 0000000000..07bfe9eed9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go
@@ -0,0 +1,33 @@
+package bridge
+
+import (
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func TestInterfaceDefaultName(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{}
+ if _ = newInterface(config); config.BridgeName != DefaultBridgeName {
+ t.Fatalf("Expected default interface name %q, got %q", DefaultBridgeName, config.BridgeName)
+ }
+}
+
+func TestAddressesEmptyInterface(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ inf := newInterface(&NetworkConfiguration{})
+ addrv4, addrsv6, err := inf.addresses()
+ if err != nil {
+ t.Fatalf("Failed to get addresses of default interface: %v", err)
+ }
+ if expected := (netlink.Addr{}); addrv4 != expected {
+ t.Fatalf("Default interface has unexpected IPv4: %s", addrv4)
+ }
+ if len(addrsv6) != 0 {
+ t.Fatalf("Default interface has unexpected IPv6: %v", addrsv6)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go
new file mode 100644
index 0000000000..4e4444e074
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go
@@ -0,0 +1,80 @@
+package bridge
+
+import (
+ "fmt"
+ "net"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/types"
+)
+
+type link struct {
+ parentIP string
+ childIP string
+ ports []types.TransportPort
+ bridge string
+}
+
+func (l *link) String() string {
+ return fmt.Sprintf("%s <-> %s [%v] on %s", l.parentIP, l.childIP, l.ports, l.bridge)
+}
+
+func newLink(parentIP, childIP string, ports []types.TransportPort, bridge string) *link {
+ return &link{
+ childIP: childIP,
+ parentIP: parentIP,
+ ports: ports,
+ bridge: bridge,
+ }
+
+}
+
+func (l *link) Enable() error {
+ // -A == iptables append flag
+ return linkContainers("-A", l.parentIP, l.childIP, l.ports, l.bridge, false)
+}
+
+func (l *link) Disable() {
+ // -D == iptables delete flag
+ err := linkContainers("-D", l.parentIP, l.childIP, l.ports, l.bridge, true)
+ if err != nil {
+ log.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error())
+ }
+ // Return proper error once we move to use a proper iptables package
+ // that returns typed errors
+}
+
+func linkContainers(action, parentIP, childIP string, ports []types.TransportPort, bridge string,
+ ignoreErrors bool) error {
+ var nfAction iptables.Action
+
+ switch action {
+ case "-A":
+ nfAction = iptables.Append
+ case "-I":
+ nfAction = iptables.Insert
+ case "-D":
+ nfAction = iptables.Delete
+ default:
+ return InvalidIPTablesCfgError(action)
+ }
+
+ ip1 := net.ParseIP(parentIP)
+ if ip1 == nil {
+ return InvalidLinkIPAddrError(parentIP)
+ }
+ ip2 := net.ParseIP(childIP)
+ if ip2 == nil {
+ return InvalidLinkIPAddrError(childIP)
+ }
+
+ chain := iptables.Chain{Name: DockerChain, Bridge: bridge}
+ for _, port := range ports {
+ err := chain.Link(nfAction, ip1, ip2, int(port.Port), port.Proto.String())
+ if !ignoreErrors && err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go
new file mode 100644
index 0000000000..fc4a6251f3
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go
@@ -0,0 +1,39 @@
+package bridge
+
+import (
+ "testing"
+
+ "github.com/docker/libnetwork/types"
+)
+
+func getPorts() []types.TransportPort {
+ return []types.TransportPort{
+ types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+ types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+ types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+ }
+}
+
+func TestLinkNew(t *testing.T) {
+ ports := getPorts()
+
+ link := newLink("172.0.17.3", "172.0.17.2", ports, "docker0")
+
+ if link == nil {
+ t.FailNow()
+ }
+ if link.parentIP != "172.0.17.3" {
+ t.Fail()
+ }
+ if link.childIP != "172.0.17.2" {
+ t.Fail()
+ }
+ for i, p := range link.ports {
+ if p != ports[i] {
+ t.Fail()
+ }
+ }
+ if link.bridge != "docker0" {
+ t.Fail()
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go
new file mode 100644
index 0000000000..20afea90ad
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go
@@ -0,0 +1,200 @@
+package bridge
+
+import (
+ "testing"
+
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func TestLinkCreate(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+ dr := d.(*driver)
+
+ mtu := 1490
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ Mtu: mtu,
+ EnableIPv6: true,
+ }
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("dummy", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "", te, nil)
+ if err != nil {
+ if _, ok := err.(InvalidEndpointIDError); !ok {
+ t.Fatalf("Failed with a wrong error :%s", err.Error())
+ }
+ } else {
+ t.Fatalf("Failed to detect invalid config")
+ }
+
+ // Good endpoint creation
+ err = d.CreateEndpoint("dummy", "ep", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to create a link: %s", err.Error())
+ }
+
+ err = d.Join("dummy", "ep", "sbox", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to create a link: %s", err.Error())
+ }
+
+ // Verify sbox endoint interface inherited MTU value from bridge config
+ sboxLnk, err := netlink.LinkByName(te.ifaces[0].srcName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if mtu != sboxLnk.Attrs().MTU {
+ t.Fatalf("Sandbox endpoint interface did not inherit bridge interface MTU config")
+ }
+ // TODO: if we could get peer name from (sboxLnk.(*netlink.Veth)).PeerName
+ // then we could check the MTU on hostLnk as well.
+
+ te1 := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep", te1, nil)
+ if err == nil {
+ t.Fatalf("Failed to detect duplicate endpoint id on same network")
+ }
+
+ if len(te.ifaces) != 1 {
+ t.Fatalf("Expected exactly one interface. Instead got %d interface(s)", len(te.ifaces))
+ }
+
+ if te.ifaces[0].dstName == "" {
+ t.Fatal("Invalid Dstname returned")
+ }
+
+ _, err = netlink.LinkByName(te.ifaces[0].srcName)
+ if err != nil {
+ t.Fatalf("Could not find source link %s: %v", te.ifaces[0].srcName, err)
+ }
+
+ n := dr.network
+ ip := te.ifaces[0].addr.IP
+ if !n.bridge.bridgeIPv4.Contains(ip) {
+ t.Fatalf("IP %s is not a valid ip in the subnet %s", ip.String(), n.bridge.bridgeIPv4.String())
+ }
+
+ ip6 := te.ifaces[0].addrv6.IP
+ if !n.bridge.bridgeIPv6.Contains(ip6) {
+ t.Fatalf("IP %s is not a valid ip in the subnet %s", ip6.String(), bridgeIPv6.String())
+ }
+
+ if !te.gw.Equal(n.bridge.bridgeIPv4.IP) {
+ t.Fatalf("Invalid default gateway. Expected %s. Got %s", n.bridge.bridgeIPv4.IP.String(),
+ te.gw.String())
+ }
+
+ if !te.gw6.Equal(n.bridge.bridgeIPv6.IP) {
+ t.Fatalf("Invalid default gateway for IPv6. Expected %s. Got %s", n.bridge.bridgeIPv6.IP.String(),
+ te.gw6.String())
+ }
+}
+
+func TestLinkCreateTwo(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPv6: true}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("dummy", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te1 := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep", te1, nil)
+ if err != nil {
+ t.Fatalf("Failed to create a link: %s", err.Error())
+ }
+
+ te2 := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep", te2, nil)
+ if err != nil {
+ if _, ok := err.(driverapi.ErrEndpointExists); !ok {
+ t.Fatalf("Failed with a wrong error: %s", err.Error())
+ }
+ } else {
+ t.Fatalf("Expected to fail while trying to add same endpoint twice")
+ }
+}
+
+func TestLinkCreateNoEnableIPv6(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("dummy", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to create a link: %s", err.Error())
+ }
+
+ interfaces := te.ifaces
+ if interfaces[0].addrv6.IP.To16() != nil {
+ t.Fatalf("Expectd IPv6 address to be nil when IPv6 is not enabled. Got IPv6 = %s", interfaces[0].addrv6.String())
+ }
+
+ if te.gw6.To16() != nil {
+ t.Fatalf("Expected GatewayIPv6 to be nil when IPv6 is not enabled. Got GatewayIPv6 = %s", te.gw6.String())
+ }
+}
+
+func TestLinkDelete(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPv6: true}
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = config
+
+ err := d.CreateNetwork("dummy", genericOption)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep1", te, nil)
+ if err != nil {
+ t.Fatalf("Failed to create a link: %s", err.Error())
+ }
+
+ err = d.DeleteEndpoint("dummy", "")
+ if err != nil {
+ if _, ok := err.(InvalidEndpointIDError); !ok {
+ t.Fatalf("Failed with a wrong error :%s", err.Error())
+ }
+ } else {
+ t.Fatalf("Failed to detect invalid config")
+ }
+
+ err = d.DeleteEndpoint("dummy", "ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go
new file mode 100644
index 0000000000..52d036241c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go
@@ -0,0 +1,124 @@
+package bridge
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/sandbox"
+ "github.com/docker/libnetwork/types"
+)
+
+var (
+ defaultBindingIP = net.IPv4(0, 0, 0, 0)
+)
+
+func allocatePorts(epConfig *EndpointConfiguration, intf *sandbox.Interface, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
+ if epConfig == nil || epConfig.PortBindings == nil {
+ return nil, nil
+ }
+
+ defHostIP := defaultBindingIP
+ if reqDefBindIP != nil {
+ defHostIP = reqDefBindIP
+ }
+
+ return allocatePortsInternal(epConfig.PortBindings, intf.Address.IP, defHostIP, ulPxyEnabled)
+}
+
+func allocatePortsInternal(bindings []types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
+ bs := make([]types.PortBinding, 0, len(bindings))
+ for _, c := range bindings {
+ b := c.GetCopy()
+ if err := allocatePort(&b, containerIP, defHostIP, ulPxyEnabled); err != nil {
+ // On allocation failure, release previously allocated ports. On cleanup error, just log a warning message
+ if cuErr := releasePortsInternal(bs); cuErr != nil {
+ logrus.Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr)
+ }
+ return nil, err
+ }
+ bs = append(bs, b)
+ }
+ return bs, nil
+}
+
+func allocatePort(bnd *types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) error {
+ var (
+ host net.Addr
+ err error
+ )
+
+ // Store the container interface address in the operational binding
+ bnd.IP = containerIP
+
+ // Adjust the host address in the operational binding
+ if len(bnd.HostIP) == 0 {
+ bnd.HostIP = defHostIP
+ }
+
+ // Construct the container side transport address
+ container, err := bnd.ContainerAddr()
+ if err != nil {
+ return err
+ }
+
+ // Try up to maxAllocatePortAttempts times to get a port that's not already allocated.
+ for i := 0; i < maxAllocatePortAttempts; i++ {
+ if host, err = portMapper.Map(container, bnd.HostIP, int(bnd.HostPort), ulPxyEnabled); err == nil {
+ break
+ }
+ // There is no point in immediately retrying to map an explicitly chosen port.
+ if bnd.HostPort != 0 {
+ logrus.Warnf("Failed to allocate and map port %d: %s", bnd.HostPort, err)
+ break
+ }
+ logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Save the host port (regardless it was or not specified in the binding)
+ switch netAddr := host.(type) {
+ case *net.TCPAddr:
+ bnd.HostPort = uint16(host.(*net.TCPAddr).Port)
+ return nil
+ case *net.UDPAddr:
+ bnd.HostPort = uint16(host.(*net.UDPAddr).Port)
+ return nil
+ default:
+ // For completeness
+ return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr))
+ }
+}
+
+func releasePorts(ep *bridgeEndpoint) error {
+ return releasePortsInternal(ep.portMapping)
+}
+
+func releasePortsInternal(bindings []types.PortBinding) error {
+ var errorBuf bytes.Buffer
+
+ // Attempt to release all port bindings, do not stop on failure
+ for _, m := range bindings {
+ if err := releasePort(m); err != nil {
+ errorBuf.WriteString(fmt.Sprintf("\ncould not release %v because of %v", m, err))
+ }
+ }
+
+ if errorBuf.Len() != 0 {
+ return errors.New(errorBuf.String())
+ }
+ return nil
+}
+
+func releasePort(bnd types.PortBinding) error {
+ // Construct the host side transport address
+ host, err := bnd.HostAddr()
+ if err != nil {
+ return err
+ }
+ return portMapper.Unmap(host)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go
new file mode 100644
index 0000000000..5eb16b6de8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go
@@ -0,0 +1,67 @@
+package bridge
+
+import (
+ "os"
+ "testing"
+
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/types"
+)
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func TestPortMappingConfig(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+ d := newDriver()
+
+ binding1 := types.PortBinding{Proto: types.UDP, Port: uint16(400), HostPort: uint16(54000)}
+ binding2 := types.PortBinding{Proto: types.TCP, Port: uint16(500), HostPort: uint16(65000)}
+ portBindings := []types.PortBinding{binding1, binding2}
+
+ epOptions := make(map[string]interface{})
+ epOptions[netlabel.PortMap] = portBindings
+
+ netConfig := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ EnableIPTables: true,
+ }
+ netOptions := make(map[string]interface{})
+ netOptions[netlabel.GenericData] = netConfig
+
+ err := d.CreateNetwork("dummy", netOptions)
+ if err != nil {
+ t.Fatalf("Failed to create bridge: %v", err)
+ }
+
+ te := &testEndpoint{ifaces: []*testInterface{}}
+ err = d.CreateEndpoint("dummy", "ep1", te, epOptions)
+ if err != nil {
+ t.Fatalf("Failed to create the endpoint: %s", err.Error())
+ }
+
+ dd := d.(*driver)
+ ep, _ := dd.network.endpoints["ep1"]
+ if len(ep.portMapping) != 2 {
+ t.Fatalf("Failed to store the port bindings into the sandbox info. Found: %v", ep.portMapping)
+ }
+ if ep.portMapping[0].Proto != binding1.Proto || ep.portMapping[0].Port != binding1.Port ||
+ ep.portMapping[1].Proto != binding2.Proto || ep.portMapping[1].Port != binding2.Port {
+ t.Fatalf("bridgeEndpoint has incorrect port mapping values")
+ }
+ if ep.portMapping[0].HostIP == nil || ep.portMapping[0].HostPort == 0 ||
+ ep.portMapping[1].HostIP == nil || ep.portMapping[1].HostPort == 0 {
+ t.Fatalf("operational port mapping data not found on bridgeEndpoint")
+ }
+
+ err = releasePorts(ep)
+ if err != nil {
+ t.Fatalf("Failed to release mapped ports: %v", err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go
new file mode 100644
index 0000000000..8861184220
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go
@@ -0,0 +1,67 @@
+package bridge
+
+import (
+ "bytes"
+ "io/ioutil"
+ "regexp"
+)
+
+const (
+ ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
+ ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
+
+ // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
+ // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
+ // -- e.g. other link-local types -- either won't work in containers or are unnecessary.
+ // For readability and sufficiency for Docker purposes this seemed more reasonable than a
+ // 1000+ character regexp with exact and complete IPv6 validation
+ ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})`
+)
+
+var nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
+
+func readResolvConf() ([]byte, error) {
+ resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ return nil, err
+ }
+ return resolv, nil
+}
+
+// getLines parses input into lines and strips away comments.
+func getLines(input []byte, commentMarker []byte) [][]byte {
+ lines := bytes.Split(input, []byte("\n"))
+ var output [][]byte
+ for _, currentLine := range lines {
+ var commentIndex = bytes.Index(currentLine, commentMarker)
+ if commentIndex == -1 {
+ output = append(output, currentLine)
+ } else {
+ output = append(output, currentLine[:commentIndex])
+ }
+ }
+ return output
+}
+
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func getNameserversAsCIDR(resolvConf []byte) []string {
+ nameservers := []string{}
+ for _, nameserver := range getNameservers(resolvConf) {
+ nameservers = append(nameservers, nameserver+"/32")
+ }
+ return nameservers
+}
+
+// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
+func getNameservers(resolvConf []byte) []string {
+ nameservers := []string{}
+ for _, line := range getLines(resolvConf, []byte("#")) {
+ var ns = nsRegexp.FindSubmatch(line)
+ if len(ns) > 0 {
+ nameservers = append(nameservers, string(ns[1]))
+ }
+ }
+ return nameservers
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go
new file mode 100644
index 0000000000..029f41c78c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go
@@ -0,0 +1,53 @@
+package bridge
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestResolveConfRead(t *testing.T) {
+ b, err := readResolvConf()
+ if err != nil {
+ t.Fatalf("Failed to read resolv.conf: %v", err)
+ }
+
+ if b == nil {
+ t.Fatal("Reading resolv.conf returned no content")
+ }
+}
+
+func TestResolveConfReadLines(t *testing.T) {
+ commentChar := []byte("#")
+
+ b, _ := readResolvConf()
+ lines := getLines(b, commentChar)
+ if lines == nil {
+ t.Fatal("Failed to read resolv.conf lines")
+ }
+
+ for _, line := range lines {
+ if bytes.Index(line, commentChar) != -1 {
+ t.Fatal("Returned comment content from resolv.conf")
+ }
+ }
+}
+
+func TestResolvConfNameserversAsCIDR(t *testing.T) {
+ resolvConf := `# Commented line
+nameserver 1.2.3.4
+
+nameserver 5.6.7.8 # Test
+`
+
+ cidrs := getNameserversAsCIDR([]byte(resolvConf))
+ if expected := 2; len(cidrs) != expected {
+ t.Fatalf("Expected %d nameservers, got %d", expected, len(cidrs))
+ }
+
+ expected := []string{"1.2.3.4/32", "5.6.7.8/32"}
+ for i, exp := range expected {
+ if cidrs[i] != exp {
+ t.Fatalf("Expected nameservers %s, got %s", exp, cidrs[i])
+ }
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go
new file mode 100644
index 0000000000..f2d0344839
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go
@@ -0,0 +1,26 @@
+package bridge
+
+type setupStep func(*NetworkConfiguration, *bridgeInterface) error
+
+type bridgeSetup struct {
+ config *NetworkConfiguration
+ bridge *bridgeInterface
+ steps []setupStep
+}
+
+func newBridgeSetup(c *NetworkConfiguration, i *bridgeInterface) *bridgeSetup {
+ return &bridgeSetup{config: c, bridge: i}
+}
+
+func (b *bridgeSetup) apply() error {
+ for _, fn := range b.steps {
+ if err := fn(b.config, b.bridge); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *bridgeSetup) queueStep(step setupStep) {
+ b.steps = append(b.steps, step)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
new file mode 100644
index 0000000000..1e0e168af4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
@@ -0,0 +1,50 @@
+package bridge
+
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/parsers/kernel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+// SetupDevice create a new bridge interface/
+func setupDevice(config *NetworkConfiguration, i *bridgeInterface) error {
+ // We only attempt to create the bridge when the requested device name is
+ // the default one.
+ if config.BridgeName != DefaultBridgeName && !config.AllowNonDefaultBridge {
+ return NonDefaultBridgeExistError(config.BridgeName)
+ }
+
+ // Set the bridgeInterface netlink.Bridge.
+ i.Link = &netlink.Bridge{
+ LinkAttrs: netlink.LinkAttrs{
+ Name: config.BridgeName,
+ },
+ }
+
+ // Only set the bridge's MAC address if the kernel version is > 3.3, as it
+ // was not supported before that.
+ kv, err := kernel.GetKernelVersion()
+ if err == nil && (kv.Kernel >= 3 && kv.Major >= 3) {
+ i.Link.Attrs().HardwareAddr = netutils.GenerateRandomMAC()
+ log.Debugf("Setting bridge mac address to %s", i.Link.Attrs().HardwareAddr)
+ }
+
+ // Call out to netlink to create the device.
+ return netlink.LinkAdd(i.Link)
+}
+
+// SetupDeviceUp ups the given bridge interface.
+func setupDeviceUp(config *NetworkConfiguration, i *bridgeInterface) error {
+ err := netlink.LinkSetUp(i.Link)
+ if err != nil {
+ return err
+ }
+
+ // Attempt to update the bridge interface to refresh the flags status,
+ // ignoring any failure to do so.
+ if lnk, err := netlink.LinkByName(config.BridgeName); err == nil {
+ i.Link = lnk
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go
new file mode 100644
index 0000000000..499f46a6ce
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go
@@ -0,0 +1,75 @@
+package bridge
+
+import (
+ "bytes"
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func TestSetupNewBridge(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+ br := &bridgeInterface{}
+
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ if br.Link == nil {
+ t.Fatal("bridgeInterface link is nil (expected valid link)")
+ }
+ if _, err := netlink.LinkByName(DefaultBridgeName); err != nil {
+ t.Fatalf("Failed to retrieve bridge device: %v", err)
+ }
+ if br.Link.Attrs().Flags&net.FlagUp == net.FlagUp {
+ t.Fatalf("bridgeInterface should be created down")
+ }
+}
+
+func TestSetupNewNonDefaultBridge(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{BridgeName: "test0"}
+ br := &bridgeInterface{}
+
+ err := setupDevice(config, br)
+ if err == nil {
+ t.Fatal("Expected bridge creation failure with \"non default name\", succeeded")
+ }
+
+ if _, ok := err.(NonDefaultBridgeExistError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestSetupDeviceUp(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+ br := &bridgeInterface{}
+
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ if err := setupDeviceUp(config, br); err != nil {
+ t.Fatalf("Failed to up bridge device: %v", err)
+ }
+
+ lnk, _ := netlink.LinkByName(DefaultBridgeName)
+ if lnk.Attrs().Flags&net.FlagUp != net.FlagUp {
+ t.Fatalf("bridgeInterface should be up")
+ }
+}
+
+func TestGenerateRandomMAC(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ mac1 := netutils.GenerateRandomMAC()
+ mac2 := netutils.GenerateRandomMAC()
+ if bytes.Compare(mac1, mac2) == 0 {
+ t.Fatalf("Generated twice the same MAC address %v", mac1)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go
new file mode 100644
index 0000000000..7657aa330c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go
@@ -0,0 +1,19 @@
+package bridge
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func setupFixedCIDRv4(config *NetworkConfiguration, i *bridgeInterface) error {
+ addrv4, _, err := i.addresses()
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("Using IPv4 subnet: %v", config.FixedCIDR)
+ if err := ipAllocator.RegisterSubnet(addrv4.IPNet, config.FixedCIDR); err != nil {
+ return &FixedCIDRv4Error{Subnet: config.FixedCIDR, Net: addrv4.IPNet, Err: err}
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go
new file mode 100644
index 0000000000..5bb57d0c68
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go
@@ -0,0 +1,62 @@
+package bridge
+
+import (
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+)
+
+func TestSetupFixedCIDRv4(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ AddressIPv4: &net.IPNet{IP: net.ParseIP("192.168.1.1"), Mask: net.CIDRMask(16, 32)},
+ FixedCIDR: &net.IPNet{IP: net.ParseIP("192.168.2.0"), Mask: net.CIDRMask(24, 32)}}
+ br := &bridgeInterface{}
+
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+ }
+
+ if err := setupFixedCIDRv4(config, br); err != nil {
+ t.Fatalf("Failed to setup bridge FixedCIDRv4: %v", err)
+ }
+
+ if ip, err := ipAllocator.RequestIP(config.FixedCIDR, nil); err != nil {
+ t.Fatalf("Failed to request IP to allocator: %v", err)
+ } else if expected := "192.168.2.1"; ip.String() != expected {
+ t.Fatalf("Expected allocated IP %s, got %s", expected, ip)
+ }
+}
+
+func TestSetupBadFixedCIDRv4(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ AddressIPv4: &net.IPNet{IP: net.ParseIP("192.168.1.1"), Mask: net.CIDRMask(24, 32)},
+ FixedCIDR: &net.IPNet{IP: net.ParseIP("192.168.2.0"), Mask: net.CIDRMask(24, 32)}}
+ br := &bridgeInterface{}
+
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+ }
+
+ err := setupFixedCIDRv4(config, br)
+ if err == nil {
+ t.Fatal("Setup bridge FixedCIDRv4 should have failed")
+ }
+
+ if _, ok := err.(*FixedCIDRv4Error); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go
new file mode 100644
index 0000000000..ade465a1cb
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go
@@ -0,0 +1,14 @@
+package bridge
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func setupFixedCIDRv6(config *NetworkConfiguration, i *bridgeInterface) error {
+ log.Debugf("Using IPv6 subnet: %v", config.FixedCIDRv6)
+ if err := ipAllocator.RegisterSubnet(config.FixedCIDRv6, config.FixedCIDRv6); err != nil {
+ return &FixedCIDRv6Error{Net: config.FixedCIDRv6, Err: err}
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go
new file mode 100644
index 0000000000..a5a2c291b5
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go
@@ -0,0 +1,37 @@
+package bridge
+
+import (
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+)
+
+func TestSetupFixedCIDRv6(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config := &NetworkConfiguration{}
+ br := newInterface(config)
+
+ _, config.FixedCIDRv6, _ = net.ParseCIDR("2002:db8::/48")
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+ }
+
+ if err := setupBridgeIPv6(config, br); err != nil {
+ t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+ }
+
+ if err := setupFixedCIDRv6(config, br); err != nil {
+ t.Fatalf("Failed to setup bridge FixedCIDRv6: %v", err)
+ }
+
+ if ip, err := ipAllocator.RequestIP(config.FixedCIDRv6, nil); err != nil {
+ t.Fatalf("Failed to request IP to allocator: %v", err)
+ } else if expected := "2002:db8::1"; ip.String() != expected {
+ t.Fatalf("Expected allocated IP %s, got %s", expected, ip)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go
new file mode 100644
index 0000000000..1bc3416ca2
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go
@@ -0,0 +1,25 @@
+package bridge
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+const (
+ ipv4ForwardConf = "/proc/sys/net/ipv4/ip_forward"
+ ipv4ForwardConfPerm = 0644
+)
+
+func setupIPForwarding(config *Configuration) error {
+ // Sanity Check
+ if config.EnableIPForwarding == false {
+ return &ErrIPFwdCfg{}
+ }
+
+ // Enable IPv4 forwarding
+ if err := ioutil.WriteFile(ipv4ForwardConf, []byte{'1', '\n'}, ipv4ForwardConfPerm); err != nil {
+ return fmt.Errorf("Setup IP forwarding failed: %v", err)
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go
new file mode 100644
index 0000000000..7c4cfea279
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go
@@ -0,0 +1,75 @@
+package bridge
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+)
+
+func TestSetupIPForwarding(t *testing.T) {
+ // Read current setting and ensure the original value gets restored
+ procSetting := readCurrentIPForwardingSetting(t)
+ defer reconcileIPForwardingSetting(t, procSetting)
+
+ // Disable IP Forwarding if enabled
+ if bytes.Compare(procSetting, []byte("1\n")) == 0 {
+ writeIPForwardingSetting(t, []byte{'0', '\n'})
+ }
+
+ // Create test interface with ip forwarding setting enabled
+ config := &Configuration{
+ EnableIPForwarding: true}
+
+ // Set IP Forwarding
+ if err := setupIPForwarding(config); err != nil {
+ t.Fatalf("Failed to setup IP forwarding: %v", err)
+ }
+
+ // Read new setting
+ procSetting = readCurrentIPForwardingSetting(t)
+ if bytes.Compare(procSetting, []byte("1\n")) != 0 {
+ t.Fatalf("Failed to effectively setup IP forwarding")
+ }
+}
+
+func TestUnexpectedSetupIPForwarding(t *testing.T) {
+ // Read current setting and ensure the original value gets restored
+ procSetting := readCurrentIPForwardingSetting(t)
+ defer reconcileIPForwardingSetting(t, procSetting)
+
+ // Create test interface without ip forwarding setting enabled
+ config := &Configuration{
+ EnableIPForwarding: false}
+
+ // Attempt Set IP Forwarding
+ err := setupIPForwarding(config)
+ if err == nil {
+ t.Fatal("Setup IP forwarding was expected to fail")
+ }
+
+ if _, ok := err.(*ErrIPFwdCfg); !ok {
+ t.Fatalf("Setup IP forwarding failed with unexpected error: %v", err)
+ }
+}
+
+func readCurrentIPForwardingSetting(t *testing.T) []byte {
+ procSetting, err := ioutil.ReadFile(ipv4ForwardConf)
+ if err != nil {
+ t.Fatalf("Can't execute test: Failed to read current IP forwarding setting: %v", err)
+ }
+ return procSetting
+}
+
+func writeIPForwardingSetting(t *testing.T, chars []byte) {
+ err := ioutil.WriteFile(ipv4ForwardConf, chars, ipv4ForwardConfPerm)
+ if err != nil {
+ t.Fatalf("Can't execute or cleanup after test: Failed to reset IP forwarding: %v", err)
+ }
+}
+
+func reconcileIPForwardingSetting(t *testing.T, original []byte) {
+ current := readCurrentIPForwardingSetting(t)
+ if bytes.Compare(original, current) != 0 {
+ writeIPForwardingSetting(t, original)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
new file mode 100644
index 0000000000..3d4619769a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
@@ -0,0 +1,173 @@
+package bridge
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/netutils"
+)
+
+// DockerChain: DOCKER iptable chain name
+const (
+ DockerChain = "DOCKER"
+)
+
+func setupIPTables(config *NetworkConfiguration, i *bridgeInterface) error {
+ // Sanity check.
+ if config.EnableIPTables == false {
+ return IPTableCfgError(config.BridgeName)
+ }
+
+ hairpinMode := !config.EnableUserlandProxy
+
+ addrv4, _, err := netutils.GetIfaceAddr(config.BridgeName)
+ if err != nil {
+ return fmt.Errorf("Failed to setup IP tables, cannot acquire Interface address: %s", err.Error())
+ }
+ if err = setupIPTablesInternal(config.BridgeName, addrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, true); err != nil {
+ return fmt.Errorf("Failed to Setup IP tables: %s", err.Error())
+ }
+
+ _, err = iptables.NewChain(DockerChain, config.BridgeName, iptables.Nat, hairpinMode)
+ if err != nil {
+ return fmt.Errorf("Failed to create NAT chain: %s", err.Error())
+ }
+
+ chain, err := iptables.NewChain(DockerChain, config.BridgeName, iptables.Filter, hairpinMode)
+ if err != nil {
+ return fmt.Errorf("Failed to create FILTER chain: %s", err.Error())
+ }
+
+ portMapper.SetIptablesChain(chain)
+
+ return nil
+}
+
+type iptRule struct {
+ table iptables.Table
+ chain string
+ preArgs []string
+ args []string
+}
+
+func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairpin, enable bool) error {
+
+ var (
+ address = addr.String()
+ natRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", address, "!", "-o", bridgeIface, "-j", "MASQUERADE"}}
+ hpNatRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-m", "addrtype", "--src-type", "LOCAL", "-o", bridgeIface, "-j", "MASQUERADE"}}
+ outRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}}
+ inRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}}
+ )
+
+ // Set NAT.
+ if ipmasq {
+ if err := programChainRule(natRule, "NAT", enable); err != nil {
+ return err
+ }
+ }
+
+ // In hairpin mode, masquerade traffic from localhost
+ if hairpin {
+ if err := programChainRule(hpNatRule, "MASQ LOCAL HOST", enable); err != nil {
+ return err
+ }
+ }
+
+ // Set Inter Container Communication.
+ if err := setIcc(bridgeIface, icc, enable); err != nil {
+ return err
+ }
+
+ // Set Accept on all non-intercontainer outgoing packets.
+ if err := programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable); err != nil {
+ return err
+ }
+
+ // Set Accept on incoming packets for existing connections.
+ if err := programChainRule(inRule, "ACCEPT INCOMING", enable); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func programChainRule(rule iptRule, ruleDescr string, insert bool) error {
+ var (
+ prefix []string
+ operation string
+ condition bool
+ doesExist = iptables.Exists(rule.table, rule.chain, rule.args...)
+ )
+
+ if insert {
+ condition = !doesExist
+ prefix = []string{"-I", rule.chain}
+ operation = "enable"
+ } else {
+ condition = doesExist
+ prefix = []string{"-D", rule.chain}
+ operation = "disable"
+ }
+ if rule.preArgs != nil {
+ prefix = append(rule.preArgs, prefix...)
+ }
+
+ if condition {
+ if output, err := iptables.Raw(append(prefix, rule.args...)...); err != nil {
+ return fmt.Errorf("Unable to %s %s rule: %s", operation, ruleDescr, err.Error())
+ } else if len(output) != 0 {
+ return &iptables.ChainError{Chain: rule.chain, Output: output}
+ }
+ }
+
+ return nil
+}
+
+func setIcc(bridgeIface string, iccEnable, insert bool) error {
+ var (
+ table = iptables.Filter
+ chain = "FORWARD"
+ args = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"}
+ acceptArgs = append(args, "ACCEPT")
+ dropArgs = append(args, "DROP")
+ )
+
+ if insert {
+ if !iccEnable {
+ iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...)
+
+ if !iptables.Exists(table, chain, dropArgs...) {
+ if output, err := iptables.Raw(append([]string{"-A", chain}, dropArgs...)...); err != nil {
+ return fmt.Errorf("Unable to prevent intercontainer communication: %s", err.Error())
+ } else if len(output) != 0 {
+ return fmt.Errorf("Error disabling intercontainer communication: %s", output)
+ }
+ }
+ } else {
+ iptables.Raw(append([]string{"-D", chain}, dropArgs...)...)
+
+ if !iptables.Exists(table, chain, acceptArgs...) {
+ if output, err := iptables.Raw(append([]string{"-A", chain}, acceptArgs...)...); err != nil {
+ return fmt.Errorf("Unable to allow intercontainer communication: %s", err.Error())
+ } else if len(output) != 0 {
+ return fmt.Errorf("Error enabling intercontainer communication: %s", output)
+ }
+ }
+ }
+ } else {
+ // Remove any ICC rule.
+ if !iccEnable {
+ if iptables.Exists(table, chain, dropArgs...) {
+ iptables.Raw(append([]string{"-D", chain}, dropArgs...)...)
+ }
+ } else {
+ if iptables.Exists(table, chain, acceptArgs...) {
+ iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go
new file mode 100644
index 0000000000..1c73ba9d7b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go
@@ -0,0 +1,103 @@
+package bridge
+
+import (
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/netutils"
+)
+
+const (
+ iptablesTestBridgeIP = "192.168.42.1"
+)
+
+func TestProgramIPTable(t *testing.T) {
+ // Create a test bridge with a basic bridge configuration (name + IPv4).
+ defer netutils.SetupTestNetNS(t)()
+ createTestBridge(getBasicTestConfig(), &bridgeInterface{}, t)
+
+ // Store various iptables chain rules we care for.
+ rules := []struct {
+ rule iptRule
+ descr string
+ }{
+ {iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-d", "127.1.2.3", "-i", "lo", "-o", "lo", "-j", "DROP"}}, "Test Loopback"},
+ {iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", iptablesTestBridgeIP, "!", "-o", DefaultBridgeName, "-j", "MASQUERADE"}}, "NAT Test"},
+ {iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "!", "-o", DefaultBridgeName, "-j", "ACCEPT"}}, "Test ACCEPT NON_ICC OUTGOING"},
+ {iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", DefaultBridgeName, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}}, "Test ACCEPT INCOMING"},
+ {iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "-o", DefaultBridgeName, "-j", "ACCEPT"}}, "Test enable ICC"},
+ {iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "-o", DefaultBridgeName, "-j", "DROP"}}, "Test disable ICC"},
+ }
+
+ // Assert the chain rules' insertion and removal.
+ for _, c := range rules {
+ assertIPTableChainProgramming(c.rule, c.descr, t)
+ }
+}
+
+func TestSetupIPTables(t *testing.T) {
+ // Create a test bridge with a basic bridge configuration (name + IPv4).
+ defer netutils.SetupTestNetNS(t)()
+ config := getBasicTestConfig()
+ br := &bridgeInterface{}
+
+ createTestBridge(config, br, t)
+
+ // Modify iptables params in base configuration and apply them.
+ config.EnableIPTables = true
+ assertBridgeConfig(config, br, t)
+
+ config.EnableIPMasquerade = true
+ assertBridgeConfig(config, br, t)
+
+ config.EnableICC = true
+ assertBridgeConfig(config, br, t)
+
+ config.EnableIPMasquerade = false
+ assertBridgeConfig(config, br, t)
+}
+
+func getBasicTestConfig() *NetworkConfiguration {
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ AddressIPv4: &net.IPNet{IP: net.ParseIP(iptablesTestBridgeIP), Mask: net.CIDRMask(16, 32)}}
+ return config
+}
+
+func createTestBridge(config *NetworkConfiguration, br *bridgeInterface, t *testing.T) {
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Failed to create the testing Bridge: %s", err.Error())
+ }
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Failed to bring up the testing Bridge: %s", err.Error())
+ }
+}
+
+// Assert base function which pushes iptables chain rules on insertion and removal.
+func assertIPTableChainProgramming(rule iptRule, descr string, t *testing.T) {
+ // Add
+ if err := programChainRule(rule, descr, true); err != nil {
+ t.Fatalf("Failed to program iptable rule %s: %s", descr, err.Error())
+ }
+ if iptables.Exists(rule.table, rule.chain, rule.args...) == false {
+ t.Fatalf("Failed to effectively program iptable rule: %s", descr)
+ }
+
+ // Remove
+ if err := programChainRule(rule, descr, false); err != nil {
+ t.Fatalf("Failed to remove iptable rule %s: %s", descr, err.Error())
+ }
+ if iptables.Exists(rule.table, rule.chain, rule.args...) == true {
+ t.Fatalf("Failed to effectively remove iptable rule: %s", descr)
+ }
+}
+
+// Assert function which pushes chains based on bridge config parameters.
+func assertBridgeConfig(config *NetworkConfiguration, br *bridgeInterface, t *testing.T) {
+ // Attempt programming of ip tables.
+ err := setupIPTables(config, br)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
new file mode 100644
index 0000000000..a0059c8543
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
@@ -0,0 +1,136 @@
+package bridge
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+
+ "path/filepath"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+var bridgeNetworks []*net.IPNet
+
+func init() {
+ // Here we don't follow the convention of using the 1st IP of the range for the gateway.
+ // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
+ // In theory this shouldn't matter - in practice there's bound to be a few scripts relying
+ // on the internal addressing or other stupid things like that.
+ // They shouldn't, but hey, let's not break them unless we really have to.
+ for _, addr := range []string{
+ "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23
+ "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive
+ "10.1.42.1/16",
+ "10.42.42.1/16",
+ "172.16.42.1/24",
+ "172.16.43.1/24",
+ "172.16.44.1/24",
+ "10.0.42.1/24",
+ "10.0.43.1/24",
+ "192.168.42.1/24",
+ "192.168.43.1/24",
+ "192.168.44.1/24",
+ } {
+ ip, net, err := net.ParseCIDR(addr)
+ if err != nil {
+ log.Errorf("Failed to parse address %s", addr)
+ continue
+ }
+ net.IP = ip.To4()
+ bridgeNetworks = append(bridgeNetworks, net)
+ }
+}
+
+func setupBridgeIPv4(config *NetworkConfiguration, i *bridgeInterface) error {
+ addrv4, _, err := i.addresses()
+ if err != nil {
+ return err
+ }
+
+ // Check if we have an IP address already on the bridge.
+ if addrv4.IPNet != nil {
+ // Make sure to store bridge network and default gateway before getting out.
+ i.bridgeIPv4 = addrv4.IPNet
+ i.gatewayIPv4 = addrv4.IPNet.IP
+ return nil
+ }
+
+ // Do not try to configure IPv4 on a non-default bridge unless you are
+ // specifically asked to do so.
+ if config.BridgeName != DefaultBridgeName && !config.AllowNonDefaultBridge {
+ return NonDefaultBridgeExistError(config.BridgeName)
+ }
+
+ bridgeIPv4, err := electBridgeIPv4(config)
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("Creating bridge interface %q with network %s", config.BridgeName, bridgeIPv4)
+ if err := netlink.AddrAdd(i.Link, &netlink.Addr{IPNet: bridgeIPv4}); err != nil {
+ return &IPv4AddrAddError{IP: bridgeIPv4, Err: err}
+ }
+
+ // Store bridge network and default gateway
+ i.bridgeIPv4 = bridgeIPv4
+ i.gatewayIPv4 = i.bridgeIPv4.IP
+
+ return nil
+}
+
+func allocateBridgeIP(config *NetworkConfiguration, i *bridgeInterface) error {
+ ipAllocator.RequestIP(i.bridgeIPv4, i.bridgeIPv4.IP)
+ return nil
+}
+
+func electBridgeIPv4(config *NetworkConfiguration) (*net.IPNet, error) {
+ // Use the requested IPv4 CIDR when available.
+ if config.AddressIPv4 != nil {
+ return config.AddressIPv4, nil
+ }
+
+ // We don't check for an error here, because we don't really care if we
+ // can't read /etc/resolv.conf. So instead we skip the append if resolvConf
+ // is nil. It either doesn't exist, or we can't read it for some reason.
+ nameservers := []string{}
+ if resolvConf, _ := readResolvConf(); resolvConf != nil {
+ nameservers = append(nameservers, getNameserversAsCIDR(resolvConf)...)
+ }
+
+ // Try to automatically elect appropriate bridge IPv4 settings.
+ for _, n := range bridgeNetworks {
+ if err := netutils.CheckNameserverOverlaps(nameservers, n); err == nil {
+ if err := netutils.CheckRouteOverlaps(n); err == nil {
+ return n, nil
+ }
+ }
+ }
+
+ return nil, IPv4AddrRangeError(config.BridgeName)
+}
+
+func setupGatewayIPv4(config *NetworkConfiguration, i *bridgeInterface) error {
+ if !i.bridgeIPv4.Contains(config.DefaultGatewayIPv4) {
+ return &ErrInvalidGateway{}
+ }
+ if _, err := ipAllocator.RequestIP(i.bridgeIPv4, config.DefaultGatewayIPv4); err != nil {
+ return err
+ }
+
+ // Store requested default gateway
+ i.gatewayIPv4 = config.DefaultGatewayIPv4
+
+ return nil
+}
+
+func setupLoopbackAdressesRouting(config *NetworkConfiguration, i *bridgeInterface) error {
+ // Enable loopback adresses routing
+ sysPath := filepath.Join("/proc/sys/net/ipv4/conf", config.BridgeName, "route_localnet")
+ if err := ioutil.WriteFile(sysPath, []byte{'1', '\n'}, 0644); err != nil {
+ return fmt.Errorf("Unable to enable local routing for hairpin mode: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go
new file mode 100644
index 0000000000..e311d641dc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go
@@ -0,0 +1,100 @@
+package bridge
+
+import (
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func setupTestInterface(t *testing.T) (*NetworkConfiguration, *bridgeInterface) {
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName}
+ br := &bridgeInterface{}
+
+ if err := setupDevice(config, br); err != nil {
+ t.Fatalf("Bridge creation failed: %v", err)
+ }
+ return config, br
+}
+
+func TestSetupBridgeIPv4Fixed(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ ip, netw, err := net.ParseCIDR("192.168.1.1/24")
+ if err != nil {
+ t.Fatalf("Failed to parse bridge IPv4: %v", err)
+ }
+
+ config, br := setupTestInterface(t)
+ config.AddressIPv4 = &net.IPNet{IP: ip, Mask: netw.Mask}
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Failed to setup bridge IPv4: %v", err)
+ }
+
+ addrsv4, err := netlink.AddrList(br.Link, netlink.FAMILY_V4)
+ if err != nil {
+ t.Fatalf("Failed to list device IPv4 addresses: %v", err)
+ }
+
+ var found bool
+ for _, addr := range addrsv4 {
+ if config.AddressIPv4.String() == addr.IPNet.String() {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("Bridge device does not have requested IPv4 address %v", config.AddressIPv4)
+ }
+}
+
+func TestSetupBridgeIPv4Auto(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config, br := setupTestInterface(t)
+ if err := setupBridgeIPv4(config, br); err != nil {
+ t.Fatalf("Failed to setup bridge IPv4: %v", err)
+ }
+
+ addrsv4, err := netlink.AddrList(br.Link, netlink.FAMILY_V4)
+ if err != nil {
+ t.Fatalf("Failed to list device IPv4 addresses: %v", err)
+ }
+
+ var found bool
+ for _, addr := range addrsv4 {
+ if bridgeNetworks[0].String() == addr.IPNet.String() {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("Bridge device does not have the automatic IPv4 address %v", bridgeNetworks[0].String())
+ }
+}
+
+func TestSetupGatewayIPv4(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ ip, nw, _ := net.ParseCIDR("192.168.0.24/16")
+ nw.IP = ip
+ gw := net.ParseIP("192.168.0.254")
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ DefaultGatewayIPv4: gw}
+
+ br := &bridgeInterface{bridgeIPv4: nw}
+
+ if err := setupGatewayIPv4(config, br); err != nil {
+ t.Fatalf("Set Default Gateway failed: %v", err)
+ }
+
+ if !gw.Equal(br.gatewayIPv4) {
+ t.Fatalf("Set Default Gateway failed. Expected %v, Found %v", gw, br.gatewayIPv4)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
new file mode 100644
index 0000000000..264e5b2a23
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
@@ -0,0 +1,66 @@
+package bridge
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+
+ "github.com/vishvananda/netlink"
+)
+
+var bridgeIPv6 *net.IPNet
+
+const bridgeIPv6Str = "fe80::1/64"
+
+func init() {
+ // We allow ourselves to panic in this special case because we indicate a
+ // failure to parse a compile-time define constant.
+ if ip, netw, err := net.ParseCIDR(bridgeIPv6Str); err == nil {
+ bridgeIPv6 = &net.IPNet{IP: ip, Mask: netw.Mask}
+ } else {
+ panic(fmt.Sprintf("Cannot parse default bridge IPv6 address %q: %v", bridgeIPv6Str, err))
+ }
+}
+
+func setupBridgeIPv6(config *NetworkConfiguration, i *bridgeInterface) error {
+ // Enable IPv6 on the bridge
+ procFile := "/proc/sys/net/ipv6/conf/" + config.BridgeName + "/disable_ipv6"
+ if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil {
+ return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err)
+ }
+
+ _, addrsv6, err := i.addresses()
+ if err != nil {
+ return err
+ }
+
+ // Add the default link local ipv6 address if it doesn't exist
+ if !findIPv6Address(netlink.Addr{IPNet: bridgeIPv6}, addrsv6) {
+ if err := netlink.AddrAdd(i.Link, &netlink.Addr{IPNet: bridgeIPv6}); err != nil {
+ return &IPv6AddrAddError{IP: bridgeIPv6, Err: err}
+ }
+ }
+
+ // Store bridge network and default gateway
+ i.bridgeIPv6 = bridgeIPv6
+ i.gatewayIPv6 = i.bridgeIPv6.IP
+
+ return nil
+}
+
+func setupGatewayIPv6(config *NetworkConfiguration, i *bridgeInterface) error {
+ if config.FixedCIDRv6 == nil {
+ return &ErrInvalidContainerSubnet{}
+ }
+ if !config.FixedCIDRv6.Contains(config.DefaultGatewayIPv6) {
+ return &ErrInvalidGateway{}
+ }
+ if _, err := ipAllocator.RequestIP(config.FixedCIDRv6, config.DefaultGatewayIPv6); err != nil {
+ return err
+ }
+
+ // Store requested default gateway
+ i.gatewayIPv6 = config.DefaultGatewayIPv6
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go
new file mode 100644
index 0000000000..cb8c17fb85
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go
@@ -0,0 +1,70 @@
+package bridge
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func TestSetupIPv6(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ config, br := setupTestInterface(t)
+ if err := setupBridgeIPv6(config, br); err != nil {
+ t.Fatalf("Failed to setup bridge IPv6: %v", err)
+ }
+
+ procSetting, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/disable_ipv6", config.BridgeName))
+ if err != nil {
+ t.Fatalf("Failed to read disable_ipv6 kernel setting: %v", err)
+ }
+
+ if expected := []byte("0\n"); bytes.Compare(expected, procSetting) != 0 {
+ t.Fatalf("Invalid kernel setting disable_ipv6: expected %q, got %q", string(expected), string(procSetting))
+ }
+
+ addrsv6, err := netlink.AddrList(br.Link, netlink.FAMILY_V6)
+ if err != nil {
+ t.Fatalf("Failed to list device IPv6 addresses: %v", err)
+ }
+
+ var found bool
+ for _, addr := range addrsv6 {
+ if bridgeIPv6Str == addr.IPNet.String() {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("Bridge device does not have requested IPv6 address %v", bridgeIPv6Str)
+ }
+
+}
+
+func TestSetupGatewayIPv6(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ _, nw, _ := net.ParseCIDR("2001:db8:ea9:9abc:ffff::/80")
+ gw := net.ParseIP("2001:db8:ea9:9abc:ffff::254")
+
+ config := &NetworkConfiguration{
+ BridgeName: DefaultBridgeName,
+ FixedCIDRv6: nw,
+ DefaultGatewayIPv6: gw}
+
+ br := &bridgeInterface{}
+
+ if err := setupGatewayIPv6(config, br); err != nil {
+ t.Fatalf("Set Default Gateway failed: %v", err)
+ }
+
+ if !gw.Equal(br.gatewayIPv6) {
+ t.Fatalf("Set Default Gateway failed. Expected %v, Found %v", gw, br.gatewayIPv6)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
new file mode 100644
index 0000000000..46d025d1b8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
@@ -0,0 +1,46 @@
+package bridge
+
+import (
+ "github.com/vishvananda/netlink"
+)
+
+func setupVerifyAndReconcile(config *NetworkConfiguration, i *bridgeInterface) error {
+ // Fetch a single IPv4 and a slice of IPv6 addresses from the bridge.
+ addrv4, addrsv6, err := i.addresses()
+ if err != nil {
+ return err
+ }
+
+ // Verify that the bridge does have an IPv4 address.
+ if addrv4.IPNet == nil {
+ return &ErrNoIPAddr{}
+ }
+
+ // Verify that the bridge IPv4 address matches the requested configuration.
+ if config.AddressIPv4 != nil && !addrv4.IP.Equal(config.AddressIPv4.IP) {
+ return &IPv4AddrNoMatchError{IP: addrv4.IP, CfgIP: config.AddressIPv4.IP}
+ }
+
+ // Verify that one of the bridge IPv6 addresses matches the requested
+ // configuration.
+ if config.EnableIPv6 && !findIPv6Address(netlink.Addr{IPNet: bridgeIPv6}, addrsv6) {
+ return (*IPv6AddrNoMatchError)(bridgeIPv6)
+ }
+
+ // By this time we have either configured a new bridge with an IP address
+ // or made sure an existing bridge's IP matches the configuration
+ // Now is the time to cache these states in the bridgeInterface.
+ i.bridgeIPv4 = addrv4.IPNet
+ i.bridgeIPv6 = bridgeIPv6
+
+ return nil
+}
+
+func findIPv6Address(addr netlink.Addr, addresses []netlink.Addr) bool {
+ for _, addrv6 := range addresses {
+ if addrv6.String() == addr.String() {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go
new file mode 100644
index 0000000000..d3c79dd504
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go
@@ -0,0 +1,110 @@
+package bridge
+
+import (
+ "net"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+)
+
+func setupVerifyTest(t *testing.T) *bridgeInterface {
+ inf := &bridgeInterface{}
+
+ br := netlink.Bridge{}
+ br.LinkAttrs.Name = "default0"
+ if err := netlink.LinkAdd(&br); err == nil {
+ inf.Link = &br
+ } else {
+ t.Fatalf("Failed to create bridge interface: %v", err)
+ }
+
+ return inf
+}
+
+func TestSetupVerify(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ addrv4 := net.IPv4(192, 168, 1, 1)
+ inf := setupVerifyTest(t)
+ config := &NetworkConfiguration{}
+ config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+ if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+ t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+ }
+
+ if err := setupVerifyAndReconcile(config, inf); err != nil {
+ t.Fatalf("Address verification failed: %v", err)
+ }
+}
+
+func TestSetupVerifyBad(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ addrv4 := net.IPv4(192, 168, 1, 1)
+ inf := setupVerifyTest(t)
+ config := &NetworkConfiguration{}
+ config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+ ipnet := &net.IPNet{IP: net.IPv4(192, 168, 1, 2), Mask: addrv4.DefaultMask()}
+ if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: ipnet}); err != nil {
+ t.Fatalf("Failed to assign IPv4 %s to interface: %v", ipnet, err)
+ }
+
+ if err := setupVerifyAndReconcile(config, inf); err == nil {
+ t.Fatal("Address verification was expected to fail")
+ }
+}
+
+func TestSetupVerifyMissing(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ addrv4 := net.IPv4(192, 168, 1, 1)
+ inf := setupVerifyTest(t)
+ config := &NetworkConfiguration{}
+ config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+ if err := setupVerifyAndReconcile(config, inf); err == nil {
+ t.Fatal("Address verification was expected to fail")
+ }
+}
+
+func TestSetupVerifyIPv6(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ addrv4 := net.IPv4(192, 168, 1, 1)
+ inf := setupVerifyTest(t)
+ config := &NetworkConfiguration{}
+ config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+ config.EnableIPv6 = true
+
+ if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: bridgeIPv6}); err != nil {
+ t.Fatalf("Failed to assign IPv6 %s to interface: %v", bridgeIPv6, err)
+ }
+ if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+ t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+ }
+
+ if err := setupVerifyAndReconcile(config, inf); err != nil {
+ t.Fatalf("Address verification failed: %v", err)
+ }
+}
+
+func TestSetupVerifyIPv6Missing(t *testing.T) {
+ defer netutils.SetupTestNetNS(t)()
+
+ addrv4 := net.IPv4(192, 168, 1, 1)
+ inf := setupVerifyTest(t)
+ config := &NetworkConfiguration{}
+ config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+ config.EnableIPv6 = true
+
+ if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+ t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+ }
+
+ if err := setupVerifyAndReconcile(config, inf); err == nil {
+ t.Fatal("Address verification was expected to fail")
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/host/host.go b/vendor/src/github.com/docker/libnetwork/drivers/host/host.go
new file mode 100644
index 0000000000..50cdad7ad5
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/host/host.go
@@ -0,0 +1,53 @@
+package host
+
+import (
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/types"
+)
+
+const networkType = "host"
+
+type driver struct{}
+
+// Init registers a new instance of host driver
+func Init(dc driverapi.DriverCallback) error {
+ return dc.RegisterDriver(networkType, &driver{})
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+ return nil
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+ return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+ return make(map[string]interface{}, 0), nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+ return (jinfo.SetHostsPath("/etc/hosts"))
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+ return nil
+}
+
+func (d *driver) Type() string {
+ return networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/null/null.go b/vendor/src/github.com/docker/libnetwork/drivers/null/null.go
new file mode 100644
index 0000000000..11ac469a09
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/null/null.go
@@ -0,0 +1,53 @@
+package null
+
+import (
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/types"
+)
+
+const networkType = "null"
+
+type driver struct{}
+
+// Init registers a new instance of null driver
+func Init(dc driverapi.DriverCallback) error {
+ return dc.RegisterDriver(networkType, &driver{})
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+ return nil
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+ return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+ return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+ return make(map[string]interface{}, 0), nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+ return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+ return nil
+}
+
+func (d *driver) Type() string {
+ return networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go
new file mode 100644
index 0000000000..ffeb720ca7
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go
@@ -0,0 +1,213 @@
+package remote
+
+import (
+ "fmt"
+ "net"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/types"
+)
+
+type driver struct {
+ endpoint *plugins.Client
+ networkType string
+}
+
+func newDriver(name string, client *plugins.Client) driverapi.Driver {
+ return &driver{networkType: name, endpoint: client}
+}
+
+// Init makes sure a remote driver is registered when a network driver
+// plugin is activated.
+func Init(dc driverapi.DriverCallback) error {
+ plugins.Handle(driverapi.NetworkPluginEndpointType, func(name string, client *plugins.Client) {
+ if err := dc.RegisterDriver(name, newDriver(name, client)); err != nil {
+ log.Errorf("error registering driver for %s due to %v", name, err)
+ }
+ })
+ return nil
+}
+
+// Config is not implemented for remote drivers, since it is assumed
+// to be supplied to the remote process out-of-band (e.g., as command
+// line arguments).
+func (d *driver) Config(option map[string]interface{}) error {
+ return &driverapi.ErrNotImplemented{}
+}
+
+func (d *driver) call(methodName string, arg interface{}, retVal maybeError) error {
+ method := driverapi.NetworkPluginEndpointType + "." + methodName
+ err := d.endpoint.Call(method, arg, retVal)
+ if err != nil {
+ return err
+ }
+ if e := retVal.getError(); e != "" {
+ return fmt.Errorf("remote: %s", e)
+ }
+ return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, options map[string]interface{}) error {
+ create := &createNetworkRequest{
+ NetworkID: string(id),
+ Options: options,
+ }
+ return d.call("CreateNetwork", create, &createNetworkResponse{})
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+ delete := &deleteNetworkRequest{NetworkID: string(nid)}
+ return d.call("DeleteNetwork", delete, &deleteNetworkResponse{})
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+ if epInfo == nil {
+ return fmt.Errorf("must not be called with nil EndpointInfo")
+ }
+
+ reqIfaces := make([]*endpointInterface, len(epInfo.Interfaces()))
+ for i, iface := range epInfo.Interfaces() {
+ addr4 := iface.Address()
+ addr6 := iface.AddressIPv6()
+ reqIfaces[i] = &endpointInterface{
+ ID: iface.ID(),
+ Address: addr4.String(),
+ AddressIPv6: addr6.String(),
+ MacAddress: iface.MacAddress().String(),
+ }
+ }
+ create := &createEndpointRequest{
+ NetworkID: string(nid),
+ EndpointID: string(eid),
+ Interfaces: reqIfaces,
+ Options: epOptions,
+ }
+ var res createEndpointResponse
+ if err := d.call("CreateEndpoint", create, &res); err != nil {
+ return err
+ }
+
+ ifaces, err := res.parseInterfaces()
+ if err != nil {
+ return err
+ }
+ if len(reqIfaces) > 0 && len(ifaces) > 0 {
+ // We're not supposed to add interfaces if there already are
+ // some. Attempt to roll back
+ return errorWithRollback("driver attempted to add more interfaces", d.DeleteEndpoint(nid, eid))
+ }
+ for _, iface := range ifaces {
+ var addr4, addr6 net.IPNet
+ if iface.Address != nil {
+ addr4 = *(iface.Address)
+ }
+ if iface.AddressIPv6 != nil {
+ addr6 = *(iface.AddressIPv6)
+ }
+ if err := epInfo.AddInterface(iface.ID, iface.MacAddress, addr4, addr6); err != nil {
+ return errorWithRollback(fmt.Sprintf("failed to AddInterface %v: %s", iface, err), d.DeleteEndpoint(nid, eid))
+ }
+ }
+ return nil
+}
+
+func errorWithRollback(msg string, err error) error {
+ rollback := "rolled back"
+ if err != nil {
+ rollback = "failed to roll back: " + err.Error()
+ }
+ return fmt.Errorf("%s; %s", msg, rollback)
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+ delete := &deleteEndpointRequest{
+ NetworkID: string(nid),
+ EndpointID: string(eid),
+ }
+ return d.call("DeleteEndpoint", delete, &deleteEndpointResponse{})
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+ info := &endpointInfoRequest{
+ NetworkID: string(nid),
+ EndpointID: string(eid),
+ }
+ var res endpointInfoResponse
+ if err := d.call("EndpointOperInfo", info, &res); err != nil {
+ return nil, err
+ }
+ return res.Value, nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+ join := &joinRequest{
+ NetworkID: string(nid),
+ EndpointID: string(eid),
+ SandboxKey: sboxKey,
+ Options: options,
+ }
+ var (
+ res joinResponse
+ err error
+ )
+ if err = d.call("Join", join, &res); err != nil {
+ return err
+ }
+
+ // Expect each interface ID given by CreateEndpoint to have an
+ // entry at that index in the names supplied here. In other words,
+ // if you supply 0..n interfaces with IDs 0..n above, you should
+ // supply the names in the same order.
+ ifaceNames := res.InterfaceNames
+ for _, iface := range jinfo.InterfaceNames() {
+ i := iface.ID()
+ if i >= len(ifaceNames) || i < 0 {
+ return fmt.Errorf("no correlating interface %d in supplied interface names", i)
+ }
+ supplied := ifaceNames[i]
+ if err := iface.SetNames(supplied.SrcName, supplied.DstName); err != nil {
+ return errorWithRollback(fmt.Sprintf("failed to set interface name: %s", err), d.Leave(nid, eid))
+ }
+ }
+
+ var addr net.IP
+ if res.Gateway != "" {
+ if addr = net.ParseIP(res.Gateway); addr == nil {
+ return fmt.Errorf(`unable to parse Gateway "%s"`, res.Gateway)
+ }
+ if jinfo.SetGateway(addr) != nil {
+ return errorWithRollback(fmt.Sprintf("failed to set gateway: %v", addr), d.Leave(nid, eid))
+ }
+ }
+ if res.GatewayIPv6 != "" {
+ if addr = net.ParseIP(res.GatewayIPv6); addr == nil {
+ return fmt.Errorf(`unable to parse GatewayIPv6 "%s"`, res.GatewayIPv6)
+ }
+ if jinfo.SetGatewayIPv6(addr) != nil {
+ return errorWithRollback(fmt.Sprintf("failed to set gateway IPv6: %v", addr), d.Leave(nid, eid))
+ }
+ }
+ if jinfo.SetHostsPath(res.HostsPath) != nil {
+ return errorWithRollback(fmt.Sprintf("failed to set hosts path: %s", res.HostsPath), d.Leave(nid, eid))
+ }
+ if jinfo.SetResolvConfPath(res.ResolvConfPath) != nil {
+ return errorWithRollback(fmt.Sprintf("failed to set resolv.conf path: %s", res.ResolvConfPath), d.Leave(nid, eid))
+ }
+ return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+ leave := &leaveRequest{
+ NetworkID: string(nid),
+ EndpointID: string(eid),
+ }
+ return d.call("Leave", leave, &leaveResponse{})
+}
+
+func (d *driver) Type() string {
+ return d.networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go
new file mode 100644
index 0000000000..a9fb8b4c16
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go
@@ -0,0 +1,397 @@
+package remote
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "testing"
+
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/libnetwork/driverapi"
+ _ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/types"
+)
+
+func decodeToMap(r *http.Request) (res map[string]interface{}, err error) {
+ err = json.NewDecoder(r.Body).Decode(&res)
+ return
+}
+
+func handle(t *testing.T, mux *http.ServeMux, method string, h func(map[string]interface{}) interface{}) {
+ mux.HandleFunc(fmt.Sprintf("/%s.%s", driverapi.NetworkPluginEndpointType, method), func(w http.ResponseWriter, r *http.Request) {
+ ask, err := decodeToMap(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ answer := h(ask)
+ err = json.NewEncoder(w).Encode(&answer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+func setupPlugin(t *testing.T, name string, mux *http.ServeMux) func() {
+ if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ listener, err := net.Listen("unix", fmt.Sprintf("/usr/share/docker/plugins/%s.sock", name))
+ if err != nil {
+ t.Fatal("Could not listen to the plugin socket")
+ }
+
+ mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType)
+ })
+
+ go http.Serve(listener, mux)
+
+ return func() {
+ listener.Close()
+ if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+type testEndpoint struct {
+ t *testing.T
+ id int
+ src string
+ dst string
+ address string
+ addressIPv6 string
+ macAddress string
+ gateway string
+ gatewayIPv6 string
+ resolvConfPath string
+ hostsPath string
+}
+
+func (test *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
+ // return an empty one so we don't trip the check for existing
+ // interfaces; we don't care about this after that
+ return []driverapi.InterfaceInfo{}
+}
+
+func (test *testEndpoint) AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+ if ID != test.id {
+ test.t.Fatalf("Wrong ID passed to AddInterface: %d", ID)
+ }
+ ip4, net4, _ := net.ParseCIDR(test.address)
+ ip6, net6, _ := net.ParseCIDR(test.addressIPv6)
+ if ip4 != nil {
+ net4.IP = ip4
+ if !types.CompareIPNet(net4, &ipv4) {
+ test.t.Fatalf("Wrong address given %+v", ipv4)
+ }
+ }
+ if ip6 != nil {
+ net6.IP = ip6
+ if !types.CompareIPNet(net6, &ipv6) {
+ test.t.Fatalf("Wrong address (IPv6) given %+v", ipv6)
+ }
+ }
+ if test.macAddress != "" && mac.String() != test.macAddress {
+ test.t.Fatalf("Wrong MAC address given %v", mac)
+ }
+ return nil
+}
+
+func (test *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+ return []driverapi.InterfaceNameInfo{test}
+}
+
+func compareIPs(t *testing.T, kind string, shouldBe string, supplied net.IP) {
+ ip := net.ParseIP(shouldBe)
+ if ip == nil {
+ t.Fatalf(`Invalid IP to test against: "%s"`, shouldBe)
+ }
+ if !ip.Equal(supplied) {
+ t.Fatalf(`%s IPs are not equal: expected "%s", got %v`, kind, shouldBe, supplied)
+ }
+}
+
+func (test *testEndpoint) SetGateway(ipv4 net.IP) error {
+ compareIPs(test.t, "Gateway", test.gateway, ipv4)
+ return nil
+}
+
+func (test *testEndpoint) SetGatewayIPv6(ipv6 net.IP) error {
+ compareIPs(test.t, "GatewayIPv6", test.gatewayIPv6, ipv6)
+ return nil
+}
+
+func (test *testEndpoint) SetHostsPath(p string) error {
+ if p != test.hostsPath {
+ test.t.Fatalf(`Wrong HostsPath; expected "%s", got "%s"`, test.hostsPath, p)
+ }
+ return nil
+}
+
+func (test *testEndpoint) SetResolvConfPath(p string) error {
+ if p != test.resolvConfPath {
+ test.t.Fatalf(`Wrong ResolvConfPath; expected "%s", got "%s"`, test.resolvConfPath, p)
+ }
+ return nil
+}
+
+func (test *testEndpoint) SetNames(src string, dst string) error {
+ if test.src != src {
+ test.t.Fatalf(`Wrong SrcName; expected "%s", got "%s"`, test.src, src)
+ }
+ if test.dst != dst {
+ test.t.Fatalf(`Wrong DstName; expected "%s", got "%s"`, test.dst, dst)
+ }
+ return nil
+}
+
+func (test *testEndpoint) ID() int {
+ return test.id
+}
+
+func TestRemoteDriver(t *testing.T) {
+ var plugin = "test-net-driver"
+
+ ep := &testEndpoint{
+ t: t,
+ src: "vethsrc",
+ dst: "vethdst",
+ address: "192.168.5.7/16",
+ addressIPv6: "2001:DB8::5:7/48",
+ macAddress: "7a:56:78:34:12:da",
+ gateway: "192.168.0.1",
+ gatewayIPv6: "2001:DB8::1",
+ hostsPath: "/here/comes/the/host/path",
+ resolvConfPath: "/there/goes/the/resolv/conf",
+ }
+
+ mux := http.NewServeMux()
+ defer setupPlugin(t, plugin, mux)()
+
+ var networkID string
+
+ handle(t, mux, "CreateNetwork", func(msg map[string]interface{}) interface{} {
+ nid := msg["NetworkID"]
+ var ok bool
+ if networkID, ok = nid.(string); !ok {
+ t.Fatal("RPC did not include network ID string")
+ }
+ return map[string]interface{}{}
+ })
+ handle(t, mux, "DeleteNetwork", func(msg map[string]interface{}) interface{} {
+ if nid, ok := msg["NetworkID"]; !ok || nid != networkID {
+ t.Fatal("Network ID missing or does not match that created")
+ }
+ return map[string]interface{}{}
+ })
+ handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+ iface := map[string]interface{}{
+ "ID": ep.id,
+ "Address": ep.address,
+ "AddressIPv6": ep.addressIPv6,
+ "MacAddress": ep.macAddress,
+ }
+ return map[string]interface{}{
+ "Interfaces": []interface{}{iface},
+ }
+ })
+ handle(t, mux, "Join", func(msg map[string]interface{}) interface{} {
+ options := msg["Options"].(map[string]interface{})
+ foo, ok := options["foo"].(string)
+ if !ok || foo != "fooValue" {
+ t.Fatalf("Did not receive expected foo string in request options: %+v", msg)
+ }
+ return map[string]interface{}{
+ "Gateway": ep.gateway,
+ "GatewayIPv6": ep.gatewayIPv6,
+ "HostsPath": ep.hostsPath,
+ "ResolvConfPath": ep.resolvConfPath,
+ "InterfaceNames": []map[string]interface{}{
+ map[string]interface{}{
+ "SrcName": ep.src,
+ "DstName": ep.dst,
+ },
+ },
+ }
+ })
+ handle(t, mux, "Leave", func(msg map[string]interface{}) interface{} {
+ return map[string]string{}
+ })
+ handle(t, mux, "DeleteEndpoint", func(msg map[string]interface{}) interface{} {
+ return map[string]interface{}{}
+ })
+ handle(t, mux, "EndpointOperInfo", func(msg map[string]interface{}) interface{} {
+ return map[string]interface{}{
+ "Value": map[string]string{
+ "Arbitrary": "key",
+ "Value": "pairs?",
+ },
+ }
+ })
+
+ p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ driver := newDriver(plugin, p.Client)
+ if driver.Type() != plugin {
+ t.Fatal("Driver type does not match that given")
+ }
+
+ netID := types.UUID("dummy-network")
+ err = driver.CreateNetwork(netID, map[string]interface{}{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ endID := types.UUID("dummy-endpoint")
+ err = driver.CreateEndpoint(netID, endID, ep, map[string]interface{}{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ joinOpts := map[string]interface{}{"foo": "fooValue"}
+ err = driver.Join(netID, endID, "sandbox-key", ep, joinOpts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = driver.EndpointOperInfo(netID, endID); err != nil {
+ t.Fatal(err)
+ }
+ if err = driver.Leave(netID, endID); err != nil {
+ t.Fatal(err)
+ }
+ if err = driver.DeleteEndpoint(netID, endID); err != nil {
+ t.Fatal(err)
+ }
+ if err = driver.DeleteNetwork(netID); err != nil {
+ t.Fatal(err)
+ }
+}
+
+type failEndpoint struct {
+ t *testing.T
+}
+
+func (f *failEndpoint) Interfaces() []*driverapi.InterfaceInfo {
+ f.t.Fatal("Unexpected call of Interfaces")
+ return nil
+}
+func (f *failEndpoint) AddInterface(int, net.HardwareAddr, net.IPNet, net.IPNet) error {
+ f.t.Fatal("Unexpected call of AddInterface")
+ return nil
+}
+
+func TestDriverError(t *testing.T) {
+ var plugin = "test-net-driver-error"
+
+ mux := http.NewServeMux()
+ defer setupPlugin(t, plugin, mux)()
+
+ handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+ return map[string]interface{}{
+ "Err": "this should get raised as an error",
+ }
+ })
+
+ p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ driver := newDriver(plugin, p.Client)
+
+ if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), &testEndpoint{t: t}, map[string]interface{}{}); err == nil {
+ t.Fatalf("Expected error from driver")
+ }
+}
+
+func TestMissingValues(t *testing.T) {
+ var plugin = "test-net-driver-missing"
+
+ mux := http.NewServeMux()
+ defer setupPlugin(t, plugin, mux)()
+
+ ep := &testEndpoint{
+ t: t,
+ id: 0,
+ }
+
+ handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+ iface := map[string]interface{}{
+ "ID": ep.id,
+ "Address": ep.address,
+ "AddressIPv6": ep.addressIPv6,
+ "MacAddress": ep.macAddress,
+ }
+ return map[string]interface{}{
+ "Interfaces": []interface{}{iface},
+ }
+ })
+
+ p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+ if err != nil {
+ t.Fatal(err)
+ }
+ driver := newDriver(plugin, p.Client)
+
+ if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), ep, map[string]interface{}{}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+type rollbackEndpoint struct {
+}
+
+func (r *rollbackEndpoint) Interfaces() []driverapi.InterfaceInfo {
+ return []driverapi.InterfaceInfo{}
+}
+
+func (r *rollbackEndpoint) AddInterface(_ int, _ net.HardwareAddr, _ net.IPNet, _ net.IPNet) error {
+ return fmt.Errorf("fail this to trigger a rollback")
+}
+
+func TestRollback(t *testing.T) {
+ var plugin = "test-net-driver-rollback"
+
+ mux := http.NewServeMux()
+ defer setupPlugin(t, plugin, mux)()
+
+ rolledback := false
+
+ handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+ iface := map[string]interface{}{
+ "ID": 0,
+ "Address": "192.168.4.5/16",
+ "AddressIPv6": "",
+ "MacAddress": "7a:12:34:56:78:90",
+ }
+ return map[string]interface{}{
+ "Interfaces": []interface{}{iface},
+ }
+ })
+ handle(t, mux, "DeleteEndpoint", func(msg map[string]interface{}) interface{} {
+ rolledback = true
+ return map[string]interface{}{}
+ })
+
+ p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+ if err != nil {
+ t.Fatal(err)
+ }
+ driver := newDriver(plugin, p.Client)
+
+ ep := &rollbackEndpoint{}
+
+ if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), ep, map[string]interface{}{}); err == nil {
+ t.Fatalf("Expected error from driver")
+ }
+ if !rolledback {
+ t.Fatalf("Expected to have had DeleteEndpoint called")
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go
new file mode 100644
index 0000000000..8e03a16daf
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go
@@ -0,0 +1,143 @@
+package remote
+
+import "net"
+
+type response struct {
+ Err string
+}
+
+type maybeError interface {
+ getError() string
+}
+
+func (r *response) getError() string {
+ return r.Err
+}
+
+type createNetworkRequest struct {
+ NetworkID string
+ Options map[string]interface{}
+}
+
+type createNetworkResponse struct {
+ response
+}
+
+type deleteNetworkRequest struct {
+ NetworkID string
+}
+
+type deleteNetworkResponse struct {
+ response
+}
+
+type createEndpointRequest struct {
+ NetworkID string
+ EndpointID string
+ Interfaces []*endpointInterface
+ Options map[string]interface{}
+}
+
+type endpointInterface struct {
+ ID int
+ Address string
+ AddressIPv6 string
+ MacAddress string
+}
+
+type createEndpointResponse struct {
+ response
+ Interfaces []*endpointInterface
+}
+
+func toAddr(ipAddr string) (*net.IPNet, error) {
+ ip, ipnet, err := net.ParseCIDR(ipAddr)
+ if err != nil {
+ return nil, err
+ }
+ ipnet.IP = ip
+ return ipnet, nil
+}
+
+type iface struct {
+ ID int
+ Address *net.IPNet
+ AddressIPv6 *net.IPNet
+ MacAddress net.HardwareAddr
+}
+
+func (r *createEndpointResponse) parseInterfaces() ([]*iface, error) {
+ var (
+ ifaces = make([]*iface, len(r.Interfaces))
+ )
+ for i, inIf := range r.Interfaces {
+ var err error
+ outIf := &iface{ID: inIf.ID}
+ if inIf.Address != "" {
+ if outIf.Address, err = toAddr(inIf.Address); err != nil {
+ return nil, err
+ }
+ }
+ if inIf.AddressIPv6 != "" {
+ if outIf.AddressIPv6, err = toAddr(inIf.AddressIPv6); err != nil {
+ return nil, err
+ }
+ }
+ if inIf.MacAddress != "" {
+ if outIf.MacAddress, err = net.ParseMAC(inIf.MacAddress); err != nil {
+ return nil, err
+ }
+ }
+ ifaces[i] = outIf
+ }
+ return ifaces, nil
+}
+
+type deleteEndpointRequest struct {
+ NetworkID string
+ EndpointID string
+}
+
+type deleteEndpointResponse struct {
+ response
+}
+
+type endpointInfoRequest struct {
+ NetworkID string
+ EndpointID string
+}
+
+type endpointInfoResponse struct {
+ response
+ Value map[string]interface{}
+}
+
+type joinRequest struct {
+ NetworkID string
+ EndpointID string
+ SandboxKey string
+ Options map[string]interface{}
+}
+
+type ifaceName struct {
+ SrcName string
+ DstName string
+}
+
+type joinResponse struct {
+ response
+ InterfaceNames []*ifaceName
+ Gateway string
+ GatewayIPv6 string
+ HostsPath string
+ ResolvConfPath string
+}
+
+type leaveRequest struct {
+ NetworkID string
+ EndpointID string
+}
+
+type leaveResponse struct {
+ response
+}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go
new file mode 100644
index 0000000000..9b832358e8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/endpoint.go
@@ -0,0 +1,728 @@
+package libnetwork
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/libnetwork/etchosts"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/resolvconf"
+ "github.com/docker/libnetwork/sandbox"
+ "github.com/docker/libnetwork/types"
+)
+
+// Endpoint represents a logical connection between a network and a sandbox.
+type Endpoint interface {
+ // A system generated id for this endpoint.
+ ID() string
+
+ // Name returns the name of this endpoint.
+ Name() string
+
+ // Network returns the name of the network to which this endpoint is attached.
+ Network() string
+
+ // Join creates a new sandbox for the given container ID and populates the
+ // network resources allocated for the endpoint and joins the sandbox to
+ // the endpoint. It returns the sandbox key to the caller
+ Join(containerID string, options ...EndpointOption) (*ContainerData, error)
+
+ // Leave removes the sandbox associated with container ID and detaches
+ // the network resources populated in the sandbox
+ Leave(containerID string, options ...EndpointOption) error
+
+ // Return certain operational data belonging to this endpoint
+ Info() EndpointInfo
+
+ // Info returns a collection of driver operational data related to this endpoint retrieved from the driver
+ DriverInfo() (map[string]interface{}, error)
+
+ // Delete and detaches this endpoint from the network.
+ Delete() error
+}
+
+// EndpointOption is a option setter function type used to pass varios options to Network
+// and Endpoint interfaces methods. The various setter functions of type EndpointOption are
+// provided by libnetwork, they look like Option[...](...)
+type EndpointOption func(ep *endpoint)
+
+// ContainerData is a set of data returned when a container joins an endpoint.
+type ContainerData struct {
+ SandboxKey string
+}
+
+// These are the container configs used to customize container /etc/hosts file.
+type hostsPathConfig struct {
+ hostName string
+ domainName string
+ hostsPath string
+ extraHosts []extraHost
+ parentUpdates []parentUpdate
+}
+
+// These are the container configs used to customize container /etc/resolv.conf file.
+type resolvConfPathConfig struct {
+ resolvConfPath string
+ dnsList []string
+ dnsSearchList []string
+}
+
+type containerConfig struct {
+ hostsPathConfig
+ resolvConfPathConfig
+ generic map[string]interface{}
+ useDefaultSandBox bool
+}
+
+type extraHost struct {
+ name string
+ IP string
+}
+
+type parentUpdate struct {
+ eid string
+ name string
+ ip string
+}
+
+type containerInfo struct {
+ id string
+ config containerConfig
+ data ContainerData
+}
+
+type endpoint struct {
+ name string
+ id types.UUID
+ network *network
+ sandboxInfo *sandbox.Info
+ iFaces []*endpointInterface
+ joinInfo *endpointJoinInfo
+ container *containerInfo
+ exposedPorts []types.TransportPort
+ generic map[string]interface{}
+ joinLeaveDone chan struct{}
+ sync.Mutex
+}
+
+const defaultPrefix = "/var/lib/docker/network/files"
+
+func (ep *endpoint) ID() string {
+ ep.Lock()
+ defer ep.Unlock()
+
+ return string(ep.id)
+}
+
+func (ep *endpoint) Name() string {
+ ep.Lock()
+ defer ep.Unlock()
+
+ return ep.name
+}
+
+func (ep *endpoint) Network() string {
+ ep.Lock()
+ defer ep.Unlock()
+
+ return ep.network.name
+}
+
+func (ep *endpoint) processOptions(options ...EndpointOption) {
+ ep.Lock()
+ defer ep.Unlock()
+
+ for _, opt := range options {
+ if opt != nil {
+ opt(ep)
+ }
+ }
+}
+
+func createBasePath(dir string) error {
+ err := os.MkdirAll(dir, 0644)
+ if err != nil && !os.IsExist(err) {
+ return err
+ }
+
+ return nil
+}
+
+func createFile(path string) error {
+ var f *os.File
+
+ dir, _ := filepath.Split(path)
+ err := createBasePath(dir)
+ if err != nil {
+ return err
+ }
+
+ f, err = os.Create(path)
+ if err == nil {
+ f.Close()
+ }
+
+ return err
+}
+
+// joinLeaveStart waits to ensure there are no joins or leaves in progress and
+// marks this join/leave in progress without race
+func (ep *endpoint) joinLeaveStart() {
+ ep.Lock()
+ defer ep.Unlock()
+
+ for ep.joinLeaveDone != nil {
+ joinLeaveDone := ep.joinLeaveDone
+ ep.Unlock()
+
+ select {
+ case <-joinLeaveDone:
+ }
+
+ ep.Lock()
+ }
+
+ ep.joinLeaveDone = make(chan struct{})
+}
+
+// joinLeaveEnd marks the end of this join/leave operation and
+// signals the same without race to other join and leave waiters
+func (ep *endpoint) joinLeaveEnd() {
+ ep.Lock()
+ defer ep.Unlock()
+
+ if ep.joinLeaveDone != nil {
+ close(ep.joinLeaveDone)
+ ep.joinLeaveDone = nil
+ }
+}
+
+func (ep *endpoint) Join(containerID string, options ...EndpointOption) (*ContainerData, error) {
+ var err error
+
+ if containerID == "" {
+ return nil, InvalidContainerIDError(containerID)
+ }
+
+ ep.joinLeaveStart()
+ defer ep.joinLeaveEnd()
+
+ ep.Lock()
+ if ep.container != nil {
+ ep.Unlock()
+ return nil, ErrInvalidJoin{}
+ }
+
+ ep.container = &containerInfo{
+ id: containerID,
+ config: containerConfig{
+ hostsPathConfig: hostsPathConfig{
+ extraHosts: []extraHost{},
+ parentUpdates: []parentUpdate{},
+ },
+ }}
+
+ ep.joinInfo = &endpointJoinInfo{}
+
+ container := ep.container
+ network := ep.network
+ epid := ep.id
+ joinInfo := ep.joinInfo
+ ifaces := ep.iFaces
+
+ ep.Unlock()
+ defer func() {
+ ep.Lock()
+ if err != nil {
+ ep.container = nil
+ }
+ ep.Unlock()
+ }()
+
+ network.Lock()
+ driver := network.driver
+ nid := network.id
+ ctrlr := network.ctrlr
+ network.Unlock()
+
+ ep.processOptions(options...)
+
+ sboxKey := sandbox.GenerateKey(containerID)
+ if container.config.useDefaultSandBox {
+ sboxKey = sandbox.GenerateKey("default")
+ }
+
+ err = driver.Join(nid, epid, sboxKey, ep, container.config.generic)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ep.buildHostsFiles()
+ if err != nil {
+ return nil, err
+ }
+
+ err = ep.updateParentHosts()
+ if err != nil {
+ return nil, err
+ }
+
+ err = ep.setupDNS()
+ if err != nil {
+ return nil, err
+ }
+
+ sb, err := ctrlr.sandboxAdd(sboxKey, !container.config.useDefaultSandBox)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ ctrlr.sandboxRm(sboxKey)
+ }
+ }()
+
+ for _, i := range ifaces {
+ iface := &sandbox.Interface{
+ SrcName: i.srcName,
+ DstName: i.dstPrefix,
+ Address: &i.addr,
+ }
+ if i.addrv6.IP.To16() != nil {
+ iface.AddressIPv6 = &i.addrv6
+ }
+ err = sb.AddInterface(iface)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = sb.SetGateway(joinInfo.gw)
+ if err != nil {
+ return nil, err
+ }
+
+ err = sb.SetGatewayIPv6(joinInfo.gw6)
+ if err != nil {
+ return nil, err
+ }
+
+ container.data.SandboxKey = sb.Key()
+ cData := container.data
+
+ return &cData, nil
+}
+
+func (ep *endpoint) Leave(containerID string, options ...EndpointOption) error {
+ var err error
+
+ ep.joinLeaveStart()
+ defer ep.joinLeaveEnd()
+
+ ep.processOptions(options...)
+
+ ep.Lock()
+ container := ep.container
+ n := ep.network
+
+ if container == nil || container.id == "" ||
+ containerID == "" || container.id != containerID {
+ if container == nil {
+ err = ErrNoContainer{}
+ } else {
+ err = InvalidContainerIDError(containerID)
+ }
+
+ ep.Unlock()
+ return err
+ }
+ ep.container = nil
+ ep.Unlock()
+
+ n.Lock()
+ driver := n.driver
+ ctrlr := n.ctrlr
+ n.Unlock()
+
+ err = driver.Leave(n.id, ep.id)
+
+ sb := ctrlr.sandboxGet(container.data.SandboxKey)
+ for _, i := range sb.Interfaces() {
+ err = sb.RemoveInterface(i)
+ if err != nil {
+ logrus.Debugf("Remove interface failed: %v", err)
+ }
+ }
+
+ ctrlr.sandboxRm(container.data.SandboxKey)
+
+ return err
+}
+
+func (ep *endpoint) Delete() error {
+ var err error
+
+ ep.Lock()
+ epid := ep.id
+ name := ep.name
+ if ep.container != nil {
+ ep.Unlock()
+ return &ActiveContainerError{name: name, id: string(epid)}
+ }
+
+ n := ep.network
+ ep.Unlock()
+
+ n.Lock()
+ _, ok := n.endpoints[epid]
+ if !ok {
+ n.Unlock()
+ return &UnknownEndpointError{name: name, id: string(epid)}
+ }
+
+ nid := n.id
+ driver := n.driver
+ delete(n.endpoints, epid)
+ n.Unlock()
+ defer func() {
+ if err != nil {
+ n.Lock()
+ n.endpoints[epid] = ep
+ n.Unlock()
+ }
+ }()
+
+ err = driver.DeleteEndpoint(nid, epid)
+ return err
+}
+
+func (ep *endpoint) buildHostsFiles() error {
+ var extraContent []etchosts.Record
+
+ ep.Lock()
+ container := ep.container
+ joinInfo := ep.joinInfo
+ ifaces := ep.iFaces
+ ep.Unlock()
+
+ if container == nil {
+ return ErrNoContainer{}
+ }
+
+ if container.config.hostsPath == "" {
+ container.config.hostsPath = defaultPrefix + "/" + container.id + "/hosts"
+ }
+
+ dir, _ := filepath.Split(container.config.hostsPath)
+ err := createBasePath(dir)
+ if err != nil {
+ return err
+ }
+
+ if joinInfo != nil && joinInfo.hostsPath != "" {
+ content, err := ioutil.ReadFile(joinInfo.hostsPath)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ if err == nil {
+ return ioutil.WriteFile(container.config.hostsPath, content, 0644)
+ }
+ }
+
+ name := container.config.hostName
+ if container.config.domainName != "" {
+ name = name + "." + container.config.domainName
+ }
+
+ for _, extraHost := range container.config.extraHosts {
+ extraContent = append(extraContent,
+ etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP})
+ }
+
+ IP := ""
+ if len(ifaces) != 0 && ifaces[0] != nil {
+ IP = ifaces[0].addr.IP.String()
+ }
+
+ return etchosts.Build(container.config.hostsPath, IP, container.config.hostName,
+ container.config.domainName, extraContent)
+}
+
+func (ep *endpoint) updateParentHosts() error {
+ ep.Lock()
+ container := ep.container
+ network := ep.network
+ ep.Unlock()
+
+ if container == nil {
+ return ErrNoContainer{}
+ }
+
+ for _, update := range container.config.parentUpdates {
+ network.Lock()
+ pep, ok := network.endpoints[types.UUID(update.eid)]
+ if !ok {
+ network.Unlock()
+ continue
+ }
+ network.Unlock()
+
+ pep.Lock()
+ pContainer := pep.container
+ pep.Unlock()
+
+ if pContainer != nil {
+ if err := etchosts.Update(pContainer.config.hostsPath, update.ip, update.name); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (ep *endpoint) updateDNS(resolvConf []byte) error {
+ ep.Lock()
+ container := ep.container
+ network := ep.network
+ ep.Unlock()
+
+ if container == nil {
+ return ErrNoContainer{}
+ }
+
+ oldHash := []byte{}
+ hashFile := container.config.resolvConfPath + ".hash"
+
+ resolvBytes, err := ioutil.ReadFile(container.config.resolvConfPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ } else {
+ oldHash, err = ioutil.ReadFile(hashFile)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ oldHash = []byte{}
+ }
+ }
+
+ curHash, err := ioutils.HashData(bytes.NewReader(resolvBytes))
+ if err != nil {
+ return err
+ }
+
+ if string(oldHash) != "" && curHash != string(oldHash) {
+ // Seems the user has changed the container resolv.conf since the last time
+ // we checked so return without doing anything.
+ return nil
+ }
+
+ // replace any localhost/127.* and remove IPv6 nameservers if IPv6 disabled.
+ resolvConf, _ = resolvconf.FilterResolvDNS(resolvConf, network.enableIPv6)
+
+ newHash, err := ioutils.HashData(bytes.NewReader(resolvConf))
+ if err != nil {
+ return err
+ }
+
+ // for atomic updates to these files, use temporary files with os.Rename:
+ dir := path.Dir(container.config.resolvConfPath)
+ tmpHashFile, err := ioutil.TempFile(dir, "hash")
+ if err != nil {
+ return err
+ }
+ tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
+ if err != nil {
+ return err
+ }
+
+ // Change the perms to 0644 since ioutil.TempFile creates it by default as 0600
+ if err := os.Chmod(tmpResolvFile.Name(), 0644); err != nil {
+ return err
+ }
+
+ // write the updates to the temp files
+ if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newHash), 0644); err != nil {
+ return err
+ }
+ if err = ioutil.WriteFile(tmpResolvFile.Name(), resolvConf, 0644); err != nil {
+ return err
+ }
+
+ // rename the temp files for atomic replace
+ if err = os.Rename(tmpHashFile.Name(), hashFile); err != nil {
+ return err
+ }
+ return os.Rename(tmpResolvFile.Name(), container.config.resolvConfPath)
+}
+
+func (ep *endpoint) setupDNS() error {
+ ep.Lock()
+ container := ep.container
+ ep.Unlock()
+
+ if container == nil {
+ return ErrNoContainer{}
+ }
+
+ if container.config.resolvConfPath == "" {
+ container.config.resolvConfPath = defaultPrefix + "/" + container.id + "/resolv.conf"
+ }
+
+ dir, _ := filepath.Split(container.config.resolvConfPath)
+ err := createBasePath(dir)
+ if err != nil {
+ return err
+ }
+
+ resolvConf, err := resolvconf.Get()
+ if err != nil {
+ return err
+ }
+
+ if len(container.config.dnsList) > 0 ||
+ len(container.config.dnsSearchList) > 0 {
+ var (
+ dnsList = resolvconf.GetNameservers(resolvConf)
+ dnsSearchList = resolvconf.GetSearchDomains(resolvConf)
+ )
+
+ if len(container.config.dnsList) > 0 {
+ dnsList = container.config.dnsList
+ }
+
+ if len(container.config.dnsSearchList) > 0 {
+ dnsSearchList = container.config.dnsSearchList
+ }
+
+ return resolvconf.Build(container.config.resolvConfPath, dnsList, dnsSearchList)
+ }
+
+ return ep.updateDNS(resolvConf)
+}
+
+// EndpointOptionGeneric function returns an option setter for a Generic option defined
+// in a Dictionary of Key-Value pair
+func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption {
+ return func(ep *endpoint) {
+ for k, v := range generic {
+ ep.generic[k] = v
+ }
+ }
+}
+
+// JoinOptionHostname function returns an option setter for hostname option to
+// be passed to endpoint Join method.
+func JoinOptionHostname(name string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.hostName = name
+ }
+}
+
+// JoinOptionDomainname function returns an option setter for domainname option to
+// be passed to endpoint Join method.
+func JoinOptionDomainname(name string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.domainName = name
+ }
+}
+
+// JoinOptionHostsPath function returns an option setter for hostspath option to
+// be passed to endpoint Join method.
+func JoinOptionHostsPath(path string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.hostsPath = path
+ }
+}
+
+// JoinOptionExtraHost function returns an option setter for extra /etc/hosts options
+// which is a name and IP as strings.
+func JoinOptionExtraHost(name string, IP string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.extraHosts = append(ep.container.config.extraHosts, extraHost{name: name, IP: IP})
+ }
+}
+
+// JoinOptionParentUpdate function returns an option setter for parent container
+// which needs to update the IP address for the linked container.
+func JoinOptionParentUpdate(eid string, name, ip string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.parentUpdates = append(ep.container.config.parentUpdates, parentUpdate{eid: eid, name: name, ip: ip})
+ }
+}
+
+// JoinOptionResolvConfPath function returns an option setter for resolvconfpath option to
+// be passed to endpoint Join method.
+func JoinOptionResolvConfPath(path string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.resolvConfPath = path
+ }
+}
+
+// JoinOptionDNS function returns an option setter for dns entry option to
+// be passed to endpoint Join method.
+func JoinOptionDNS(dns string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.dnsList = append(ep.container.config.dnsList, dns)
+ }
+}
+
+// JoinOptionDNSSearch function returns an option setter for dns search entry option to
+// be passed to endpoint Join method.
+func JoinOptionDNSSearch(search string) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.dnsSearchList = append(ep.container.config.dnsSearchList, search)
+ }
+}
+
+// JoinOptionUseDefaultSandbox function returns an option setter for using default sandbox to
+// be passed to endpoint Join method.
+func JoinOptionUseDefaultSandbox() EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.useDefaultSandBox = true
+ }
+}
+
+// CreateOptionExposedPorts function returns an option setter for the container exposed
+// ports option to be passed to network.CreateEndpoint() method.
+func CreateOptionExposedPorts(exposedPorts []types.TransportPort) EndpointOption {
+ return func(ep *endpoint) {
+ // Defensive copy
+ eps := make([]types.TransportPort, len(exposedPorts))
+ copy(eps, exposedPorts)
+ // Store endpoint label and in generic because driver needs it
+ ep.exposedPorts = eps
+ ep.generic[netlabel.ExposedPorts] = eps
+ }
+}
+
+// CreateOptionPortMapping function returns an option setter for the mapping
+// ports option to be passed to network.CreateEndpoint() method.
+func CreateOptionPortMapping(portBindings []types.PortBinding) EndpointOption {
+ return func(ep *endpoint) {
+ // Store a copy of the bindings as generic data to pass to the driver
+ pbs := make([]types.PortBinding, len(portBindings))
+ copy(pbs, portBindings)
+ ep.generic[netlabel.PortMap] = pbs
+ }
+}
+
+// JoinOptionGeneric function returns an option setter for Generic configuration
+// that is not managed by libNetwork but can be used by the Drivers during the call to
+// endpoint join method. Container Labels are a good example.
+func JoinOptionGeneric(generic map[string]interface{}) EndpointOption {
+ return func(ep *endpoint) {
+ ep.container.config.generic = generic
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint_info.go b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
new file mode 100644
index 0000000000..f04521595a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
@@ -0,0 +1,215 @@
+package libnetwork
+
+import (
+ "net"
+
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/types"
+)
+
+// EndpointInfo provides an interface to retrieve network resources bound to the endpoint.
+type EndpointInfo interface {
+ // InterfaceList returns an interface list which were assigned to the endpoint
+ // by the driver. This can be used after the endpoint has been created.
+ InterfaceList() []InterfaceInfo
+
+ // Gateway returns the IPv4 gateway assigned by the driver.
+ // This will only return a valid value if a container has joined the endpoint.
+ Gateway() net.IP
+
+ // GatewayIPv6 returns the IPv6 gateway assigned by the driver.
+ // This will only return a valid value if a container has joined the endpoint.
+ GatewayIPv6() net.IP
+
+ // SandboxKey returns the sanbox key for the container which has joined
+ // the endpoint. If there is no container joined then this will return an
+ // empty string.
+ SandboxKey() string
+}
+
+// InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint.
+type InterfaceInfo interface {
+ // MacAddress returns the MAC address assigned to the endpoint.
+ MacAddress() net.HardwareAddr
+
+ // Address returns the IPv4 address assigned to the endpoint.
+ Address() net.IPNet
+
+ // AddressIPv6 returns the IPv6 address assigned to the endpoint.
+ AddressIPv6() net.IPNet
+}
+
+type endpointInterface struct {
+ id int
+ mac net.HardwareAddr
+ addr net.IPNet
+ addrv6 net.IPNet
+ srcName string
+ dstPrefix string
+}
+
+type endpointJoinInfo struct {
+ gw net.IP
+ gw6 net.IP
+ hostsPath string
+ resolvConfPath string
+}
+
+func (ep *endpoint) Info() EndpointInfo {
+ return ep
+}
+
+func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
+ ep.Lock()
+ network := ep.network
+ epid := ep.id
+ ep.Unlock()
+
+ network.Lock()
+ driver := network.driver
+ nid := network.id
+ network.Unlock()
+
+ return driver.EndpointOperInfo(nid, epid)
+}
+
+func (ep *endpoint) InterfaceList() []InterfaceInfo {
+ ep.Lock()
+ defer ep.Unlock()
+
+ iList := make([]InterfaceInfo, len(ep.iFaces))
+
+ for i, iface := range ep.iFaces {
+ iList[i] = iface
+ }
+
+ return iList
+}
+
+func (ep *endpoint) Interfaces() []driverapi.InterfaceInfo {
+ ep.Lock()
+ defer ep.Unlock()
+
+ iList := make([]driverapi.InterfaceInfo, len(ep.iFaces))
+
+ for i, iface := range ep.iFaces {
+ iList[i] = iface
+ }
+
+ return iList
+}
+
+func (ep *endpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+ ep.Lock()
+ defer ep.Unlock()
+
+ iface := &endpointInterface{
+ id: id,
+ addr: *types.GetIPNetCopy(&ipv4),
+ addrv6: *types.GetIPNetCopy(&ipv6),
+ }
+ iface.mac = types.GetMacCopy(mac)
+
+ ep.iFaces = append(ep.iFaces, iface)
+ return nil
+}
+
+func (i *endpointInterface) ID() int {
+ return i.id
+}
+
+func (i *endpointInterface) MacAddress() net.HardwareAddr {
+ return types.GetMacCopy(i.mac)
+}
+
+func (i *endpointInterface) Address() net.IPNet {
+ return (*types.GetIPNetCopy(&i.addr))
+}
+
+func (i *endpointInterface) AddressIPv6() net.IPNet {
+ return (*types.GetIPNetCopy(&i.addrv6))
+}
+
+func (i *endpointInterface) SetNames(srcName string, dstPrefix string) error {
+ i.srcName = srcName
+ i.dstPrefix = dstPrefix
+ return nil
+}
+
+func (ep *endpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+ ep.Lock()
+ defer ep.Unlock()
+
+ iList := make([]driverapi.InterfaceNameInfo, len(ep.iFaces))
+
+ for i, iface := range ep.iFaces {
+ iList[i] = iface
+ }
+
+ return iList
+}
+
+func (ep *endpoint) SandboxKey() string {
+ ep.Lock()
+ defer ep.Unlock()
+
+ if ep.container == nil {
+ return ""
+ }
+
+ return ep.container.data.SandboxKey
+}
+
+func (ep *endpoint) Gateway() net.IP {
+ ep.Lock()
+ defer ep.Unlock()
+
+ if ep.joinInfo == nil {
+ return net.IP{}
+ }
+
+ return types.GetIPCopy(ep.joinInfo.gw)
+}
+
+func (ep *endpoint) GatewayIPv6() net.IP {
+ ep.Lock()
+ defer ep.Unlock()
+
+ if ep.joinInfo == nil {
+ return net.IP{}
+ }
+
+ return types.GetIPCopy(ep.joinInfo.gw6)
+}
+
+func (ep *endpoint) SetGateway(gw net.IP) error {
+ ep.Lock()
+ defer ep.Unlock()
+
+ ep.joinInfo.gw = types.GetIPCopy(gw)
+ return nil
+}
+
+func (ep *endpoint) SetGatewayIPv6(gw6 net.IP) error {
+ ep.Lock()
+ defer ep.Unlock()
+
+ ep.joinInfo.gw6 = types.GetIPCopy(gw6)
+ return nil
+}
+
+func (ep *endpoint) SetHostsPath(path string) error {
+ ep.Lock()
+ defer ep.Unlock()
+
+ ep.joinInfo.hostsPath = path
+ return nil
+}
+
+func (ep *endpoint) SetResolvConfPath(path string) error {
+ ep.Lock()
+ defer ep.Unlock()
+
+ ep.joinInfo.resolvConfPath = path
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/error.go b/vendor/src/github.com/docker/libnetwork/error.go
new file mode 100644
index 0000000000..a1cd01d678
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/error.go
@@ -0,0 +1,168 @@
+package libnetwork
+
+import (
+ "fmt"
+)
+
+// ErrNoSuchNetwork is returned when a network query finds no result
+type ErrNoSuchNetwork string
+
+func (nsn ErrNoSuchNetwork) Error() string {
+ return fmt.Sprintf("network %s not found", string(nsn))
+}
+
+// BadRequest denotes the type of this error
+func (nsn ErrNoSuchNetwork) BadRequest() {}
+
+// ErrNoSuchEndpoint is returned when a endpoint query finds no result
+type ErrNoSuchEndpoint string
+
+func (nse ErrNoSuchEndpoint) Error() string {
+ return fmt.Sprintf("endpoint %s not found", string(nse))
+}
+
+// BadRequest denotes the type of this error
+func (nse ErrNoSuchEndpoint) BadRequest() {}
+
+// ErrInvalidNetworkDriver is returned if an invalid driver
+// name is passed.
+type ErrInvalidNetworkDriver string
+
+func (ind ErrInvalidNetworkDriver) Error() string {
+ return fmt.Sprintf("invalid driver bound to network: %s", string(ind))
+}
+
+// BadRequest denotes the type of this error
+func (ind ErrInvalidNetworkDriver) BadRequest() {}
+
+// ErrInvalidJoin is returned if a join is attempted on an endpoint
+// which already has a container joined.
+type ErrInvalidJoin struct{}
+
+func (ij ErrInvalidJoin) Error() string {
+ return "a container has already joined the endpoint"
+}
+
+// BadRequest denotes the type of this error
+func (ij ErrInvalidJoin) BadRequest() {}
+
+// ErrNoContainer is returned when the endpoint has no container
+// attached to it.
+type ErrNoContainer struct{}
+
+func (nc ErrNoContainer) Error() string {
+ return "a container has already joined the endpoint"
+}
+
+// Maskable denotes the type of this error
+func (nc ErrNoContainer) Maskable() {}
+
+// ErrInvalidID is returned when a query-by-id method is being invoked
+// with an empty id parameter
+type ErrInvalidID string
+
+func (ii ErrInvalidID) Error() string {
+ return fmt.Sprintf("invalid id: %s", string(ii))
+}
+
+// BadRequest denotes the type of this error
+func (ii ErrInvalidID) BadRequest() {}
+
+// ErrInvalidName is returned when a query-by-name or resource create method is
+// invoked with an empty name parameter
+type ErrInvalidName string
+
+func (in ErrInvalidName) Error() string {
+ return fmt.Sprintf("invalid name: %s", string(in))
+}
+
+// BadRequest denotes the type of this error
+func (in ErrInvalidName) BadRequest() {}
+
+// NetworkTypeError type is returned when the network type string is not
+// known to libnetwork.
+type NetworkTypeError string
+
+func (nt NetworkTypeError) Error() string {
+ return fmt.Sprintf("unknown driver %q", string(nt))
+}
+
+// NotFound denotes the type of this error
+func (nt NetworkTypeError) NotFound() {}
+
+// NetworkNameError is returned when a network with the same name already exists.
+type NetworkNameError string
+
+func (nnr NetworkNameError) Error() string {
+ return fmt.Sprintf("network with name %s already exists", string(nnr))
+}
+
+// Forbidden denotes the type of this error
+func (nnr NetworkNameError) Forbidden() {}
+
+// UnknownNetworkError is returned when libnetwork could not find in it's database
+// a network with the same name and id.
+type UnknownNetworkError struct {
+ name string
+ id string
+}
+
+func (une *UnknownNetworkError) Error() string {
+ return fmt.Sprintf("unknown network %s id %s", une.name, une.id)
+}
+
+// NotFound denotes the type of this error
+func (une *UnknownNetworkError) NotFound() {}
+
+// ActiveEndpointsError is returned when a network is deleted which has active
+// endpoints in it.
+type ActiveEndpointsError struct {
+ name string
+ id string
+}
+
+func (aee *ActiveEndpointsError) Error() string {
+ return fmt.Sprintf("network with name %s id %s has active endpoints", aee.name, aee.id)
+}
+
+// Forbidden denotes the type of this error
+func (aee *ActiveEndpointsError) Forbidden() {}
+
+// UnknownEndpointError is returned when libnetwork could not find in it's database
+// an endpoint with the same name and id.
+type UnknownEndpointError struct {
+ name string
+ id string
+}
+
+func (uee *UnknownEndpointError) Error() string {
+ return fmt.Sprintf("unknown endpoint %s id %s", uee.name, uee.id)
+}
+
+// NotFound denotes the type of this error
+func (uee *UnknownEndpointError) NotFound() {}
+
+// ActiveContainerError is returned when an endpoint is deleted which has active
+// containers attached to it.
+type ActiveContainerError struct {
+ name string
+ id string
+}
+
+func (ace *ActiveContainerError) Error() string {
+ return fmt.Sprintf("endpoint with name %s id %s has active containers", ace.name, ace.id)
+}
+
+// Forbidden denotes the type of this error
+func (ace *ActiveContainerError) Forbidden() {}
+
+// InvalidContainerIDError is returned when an invalid container id is passed
+// in Join/Leave
+type InvalidContainerIDError string
+
+func (id InvalidContainerIDError) Error() string {
+ return fmt.Sprintf("invalid container id %s", string(id))
+}
+
+// BadRequest denotes the type of this error
+func (id InvalidContainerIDError) BadRequest() {}
diff --git a/vendor/src/github.com/docker/libnetwork/errors_test.go b/vendor/src/github.com/docker/libnetwork/errors_test.go
new file mode 100644
index 0000000000..29bf668689
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/errors_test.go
@@ -0,0 +1,51 @@
+package libnetwork
+
+import (
+ "testing"
+
+ "github.com/docker/libnetwork/types"
+)
+
+func TestErrorInterfaces(t *testing.T) {
+
+ badRequestErrorList := []error{ErrInvalidID(""), ErrInvalidName(""), ErrInvalidJoin{}, ErrInvalidNetworkDriver(""), InvalidContainerIDError(""), ErrNoSuchNetwork(""), ErrNoSuchEndpoint("")}
+ for _, err := range badRequestErrorList {
+ switch u := err.(type) {
+ case types.BadRequestError:
+ return
+ default:
+ t.Fatalf("Failed to detect err %v is of type BadRequestError. Got type: %T", err, u)
+ }
+ }
+
+ maskableErrorList := []error{ErrNoContainer{}}
+ for _, err := range maskableErrorList {
+ switch u := err.(type) {
+ case types.MaskableError:
+ return
+ default:
+ t.Fatalf("Failed to detect err %v is of type MaskableError. Got type: %T", err, u)
+ }
+ }
+
+ notFoundErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}}
+ for _, err := range notFoundErrorList {
+ switch u := err.(type) {
+ case types.NotFoundError:
+ return
+ default:
+ t.Fatalf("Failed to detect err %v is of type NotFoundError. Got type: %T", err, u)
+ }
+ }
+
+ forbiddenErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}}
+ for _, err := range forbiddenErrorList {
+ switch u := err.(type) {
+ case types.ForbiddenError:
+ return
+ default:
+ t.Fatalf("Failed to detect err %v is of type ForbiddenError. Got type: %T", err, u)
+ }
+ }
+
+}
diff --git a/pkg/etchosts/etchosts.go b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
similarity index 94%
rename from pkg/etchosts/etchosts.go
rename to vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
index bef4a480cb..88e6b63e70 100644
--- a/pkg/etchosts/etchosts.go
+++ b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
@@ -8,13 +8,13 @@ import (
"regexp"
)
-// Structure for a single host record
+// Record Structure for a single host record
type Record struct {
Hosts string
IP string
}
-// Writes record to file and returns bytes written or error
+// WriteTo writes record to file and returns bytes written or error
func (r Record) WriteTo(w io.Writer) (int64, error) {
n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts)
return int64(n), err
diff --git a/pkg/etchosts/etchosts_test.go b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go
similarity index 98%
rename from pkg/etchosts/etchosts_test.go
rename to vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go
index c033904c31..8c8b87c016 100644
--- a/pkg/etchosts/etchosts_test.go
+++ b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go
@@ -5,6 +5,8 @@ import (
"io/ioutil"
"os"
"testing"
+
+ _ "github.com/docker/libnetwork/netutils"
)
func TestBuildDefault(t *testing.T) {
diff --git a/daemon/networkdriver/ipallocator/allocator.go b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go
similarity index 79%
rename from daemon/networkdriver/ipallocator/allocator.go
rename to vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go
index 554dbdd5b1..1560099937 100644
--- a/daemon/networkdriver/ipallocator/allocator.go
+++ b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go
@@ -1,3 +1,5 @@
+// Package ipallocator defines the default IP allocator. It will move out of libnetwork as an external IPAM plugin.
+// This has been imported unchanged from Docker, besides additon of registration logic
package ipallocator
import (
@@ -7,7 +9,7 @@ import (
"sync"
"github.com/Sirupsen/logrus"
- "github.com/docker/docker/daemon/networkdriver"
+ "github.com/docker/libnetwork/netutils"
)
// allocatedMap is thread-unsafe set of allocated IP
@@ -19,7 +21,7 @@ type allocatedMap struct {
}
func newAllocatedMap(network *net.IPNet) *allocatedMap {
- firstIP, lastIP := networkdriver.NetworkRange(network)
+ firstIP, lastIP := netutils.NetworkRange(network)
begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1))
@@ -34,18 +36,25 @@ func newAllocatedMap(network *net.IPNet) *allocatedMap {
type networkSet map[string]*allocatedMap
var (
- ErrNoAvailableIPs = errors.New("no available ip addresses on network")
- ErrIPAlreadyAllocated = errors.New("ip already allocated")
- ErrIPOutOfRange = errors.New("requested ip is out of range")
+ // ErrNoAvailableIPs preformatted error
+ ErrNoAvailableIPs = errors.New("no available ip addresses on network")
+ // ErrIPAlreadyAllocated preformatted error
+ ErrIPAlreadyAllocated = errors.New("ip already allocated")
+ // ErrIPOutOfRange preformatted error
+ ErrIPOutOfRange = errors.New("requested ip is out of range")
+ // ErrNetworkAlreadyRegistered preformatted error
ErrNetworkAlreadyRegistered = errors.New("network already registered")
- ErrBadSubnet = errors.New("network does not contain specified subnet")
+ // ErrBadSubnet preformatted error
+ ErrBadSubnet = errors.New("network does not contain specified subnet")
)
+// IPAllocator manages the ipam
type IPAllocator struct {
allocatedIPs networkSet
mutex sync.Mutex
}
+// New returns a new instance of IPAllocator
func New() *IPAllocator {
return &IPAllocator{networkSet{}, sync.Mutex{}}
}
@@ -61,18 +70,14 @@ func (a *IPAllocator) RegisterSubnet(network *net.IPNet, subnet *net.IPNet) erro
if _, ok := a.allocatedIPs[key]; ok {
return ErrNetworkAlreadyRegistered
}
- n := newAllocatedMap(network)
- beginIP, endIP := networkdriver.NetworkRange(subnet)
- begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1))
- end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1))
// Check that subnet is within network
- if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) {
+ beginIP, endIP := netutils.NetworkRange(subnet)
+ if !(network.Contains(beginIP) && network.Contains(endIP)) {
return ErrBadSubnet
}
- n.begin.Set(begin)
- n.end.Set(end)
- n.last.Sub(begin, big.NewInt(1))
+
+ n := newAllocatedMap(subnet)
a.allocatedIPs[key] = n
return nil
}
diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go
similarity index 99%
rename from daemon/networkdriver/ipallocator/allocator_test.go
rename to vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go
index 6c5c0e4dbc..fffe6e3389 100644
--- a/daemon/networkdriver/ipallocator/allocator_test.go
+++ b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go
@@ -601,7 +601,7 @@ func TestRegisterBadTwice(t *testing.T) {
Mask: []byte{255, 255, 255, 248},
}
if err := a.RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
- t.Fatalf("Expected ErrNetworkAlreadyRegistered error, got %v", err)
+ t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err)
}
}
diff --git a/pkg/iptables/firewalld.go b/vendor/src/github.com/docker/libnetwork/iptables/firewalld.go
similarity index 71%
rename from pkg/iptables/firewalld.go
rename to vendor/src/github.com/docker/libnetwork/iptables/firewalld.go
index 3087794131..1227647b74 100644
--- a/pkg/iptables/firewalld.go
+++ b/vendor/src/github.com/docker/libnetwork/iptables/firewalld.go
@@ -2,17 +2,22 @@ package iptables
import (
"fmt"
+ "strings"
+
"github.com/Sirupsen/logrus"
"github.com/godbus/dbus"
- "strings"
)
+// IPV defines the table string
type IPV string
const (
- Iptables IPV = "ipv4"
- Ip6tables IPV = "ipv6"
- Ebtables IPV = "eb"
+ // Iptables point ipv4 table
+ Iptables IPV = "ipv4"
+ // IP6Tables point to ipv6 table
+ IP6Tables IPV = "ipv6"
+ // Ebtables point to bridge table
+ Ebtables IPV = "eb"
)
const (
dbusInterface = "org.fedoraproject.FirewallD1"
@@ -32,16 +37,19 @@ var (
onReloaded []*func() // callbacks when Firewalld has been reloaded
)
-func FirewalldInit() {
+// FirewalldInit initializes firewalld management code.
+func FirewalldInit() error {
var err error
- connection, err = newConnection()
-
- if err != nil {
- logrus.Errorf("Failed to connect to D-Bus system bus: %s", err)
+ if connection, err = newConnection(); err != nil {
+ return fmt.Errorf("Failed to connect to D-Bus system bus: %v", err)
+ }
+ if connection != nil {
+ go signalHandler()
}
firewalldRunning = checkRunning()
+ return nil
}
// New() establishes a connection to the system bus.
@@ -76,36 +84,33 @@ func (c *Conn) initConnection() error {
c.signal = make(chan *dbus.Signal, 10)
c.sysconn.Signal(c.signal)
- go signalHandler()
return nil
}
func signalHandler() {
- if connection != nil {
- for signal := range connection.signal {
- if strings.Contains(signal.Name, "NameOwnerChanged") {
- firewalldRunning = checkRunning()
- dbusConnectionChanged(signal.Body)
- } else if strings.Contains(signal.Name, "Reloaded") {
- reloaded()
- }
+ for signal := range connection.signal {
+ if strings.Contains(signal.Name, "NameOwnerChanged") {
+ firewalldRunning = checkRunning()
+ dbusConnectionChanged(signal.Body)
+ } else if strings.Contains(signal.Name, "Reloaded") {
+ reloaded()
}
}
}
func dbusConnectionChanged(args []interface{}) {
name := args[0].(string)
- old_owner := args[1].(string)
- new_owner := args[2].(string)
+ oldOwner := args[1].(string)
+ newOwner := args[2].(string)
if name != dbusInterface {
return
}
- if len(new_owner) > 0 {
+ if len(newOwner) > 0 {
connectionEstablished()
- } else if len(old_owner) > 0 {
+ } else if len(oldOwner) > 0 {
connectionLost()
}
}
@@ -125,7 +130,7 @@ func reloaded() {
}
}
-// add callback
+// OnReloaded add callback
func OnReloaded(callback func()) {
for _, pf := range onReloaded {
if pf == &callback {
@@ -145,19 +150,15 @@ func checkRunning() bool {
logrus.Infof("Firewalld running: %t", err == nil)
return err == nil
}
- logrus.Info("Firewalld not running")
return false
}
-// Firewalld's passthrough method simply passes args through to iptables/ip6tables
+// Passthrough method simply passes args through to iptables/ip6tables
func Passthrough(ipv IPV, args ...string) ([]byte, error) {
var output string
-
logrus.Debugf("Firewalld passthrough: %s, %s", ipv, args)
- err := connection.sysobj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output)
- if output != "" {
- logrus.Debugf("passthrough output: %s", output)
+ if err := connection.sysobj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output); err != nil {
+ return nil, err
}
-
- return []byte(output), err
+ return []byte(output), nil
}
diff --git a/pkg/iptables/firewalld_test.go b/vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go
similarity index 88%
rename from pkg/iptables/firewalld_test.go
rename to vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go
index 3896007d64..547ba7e683 100644
--- a/pkg/iptables/firewalld_test.go
+++ b/vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go
@@ -7,14 +7,19 @@ import (
)
func TestFirewalldInit(t *testing.T) {
- FirewalldInit()
+ if !checkRunning() {
+ t.Skip("firewalld is not running")
+ }
+ if err := FirewalldInit(); err != nil {
+ t.Fatal(err)
+ }
}
func TestReloaded(t *testing.T) {
var err error
var fwdChain *Chain
- fwdChain, err = NewChain("FWD", "lo", Filter)
+ fwdChain, err = NewChain("FWD", "lo", Filter, false)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/iptables/iptables.go b/vendor/src/github.com/docker/libnetwork/iptables/iptables.go
similarity index 80%
rename from pkg/iptables/iptables.go
rename to vendor/src/github.com/docker/libnetwork/iptables/iptables.go
index 0cfcca7502..4299a7e2b6 100644
--- a/pkg/iptables/iptables.go
+++ b/vendor/src/github.com/docker/libnetwork/iptables/iptables.go
@@ -5,37 +5,51 @@ import (
"fmt"
"net"
"os/exec"
- "regexp"
"strconv"
"strings"
+ "sync"
"github.com/Sirupsen/logrus"
)
+// Action signifies the iptable action.
type Action string
+
+// Table refers to Nat, Filter or Mangle.
type Table string
const (
+ // Append appends the rule at the end of the chain.
Append Action = "-A"
+ // Delete deletes the rule from the chain.
Delete Action = "-D"
+ // Insert inserts the rule at the top of the chain.
Insert Action = "-I"
- Nat Table = "nat"
- Filter Table = "filter"
- Mangle Table = "mangle"
+ // Nat table is used for nat translation rules.
+ Nat Table = "nat"
+ // Filter table is used for filter rules.
+ Filter Table = "filter"
+ // Mangle table is used for mangling the packet.
+ Mangle Table = "mangle"
)
var (
- iptablesPath string
- supportsXlock = false
+ iptablesPath string
+ supportsXlock = false
+ // used to lock iptables commands if xtables lock is not supported
+ bestEffortLock sync.Mutex
+ // ErrIptablesNotFound is returned when the rule is not found.
ErrIptablesNotFound = errors.New("Iptables not found")
)
+// Chain defines the iptables chain.
type Chain struct {
Name string
Bridge string
Table Table
}
+// ChainError is returned to represent errors during ip table operation.
type ChainError struct {
Chain string
Output []byte
@@ -58,7 +72,8 @@ func initCheck() error {
return nil
}
-func NewChain(name, bridge string, table Table) (*Chain, error) {
+// NewChain adds a new chain to ip table.
+func NewChain(name, bridge string, table Table, hairpinMode bool) (*Chain, error) {
c := &Chain{
Name: name,
Bridge: bridge,
@@ -90,8 +105,10 @@ func NewChain(name, bridge string, table Table) (*Chain, error) {
}
output := []string{
"-m", "addrtype",
- "--dst-type", "LOCAL",
- "!", "--dst", "127.0.0.0/8"}
+ "--dst-type", "LOCAL"}
+ if !hairpinMode {
+ output = append(output, "!", "--dst", "127.0.0.0/8")
+ }
if !Exists(Nat, "OUTPUT", output...) {
if err := c.Output(Append, output...); err != nil {
return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err)
@@ -113,6 +130,7 @@ func NewChain(name, bridge string, table Table) (*Chain, error) {
return c, nil
}
+// RemoveExistingChain removes existing chain from the table.
func RemoveExistingChain(name string, table Table) error {
c := &Chain{
Name: name,
@@ -124,7 +142,7 @@ func RemoveExistingChain(name string, table Table) error {
return c.Remove()
}
-// Add forwarding rule to 'filter' table and corresponding nat rule to 'nat' table
+// Forward adds forwarding rule to 'filter' table and corresponding nat rule to 'nat' table.
func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error {
daddr := ip.String()
if ip.IsUnspecified() {
@@ -137,7 +155,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr stri
"-p", proto,
"-d", daddr,
"--dport", strconv.Itoa(port),
- "!", "-i", c.Bridge,
"-j", "DNAT",
"--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil {
return err
@@ -171,7 +188,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr stri
return nil
}
-// Add reciprocal ACCEPT rule for two supplied IP addresses.
+// Link adds reciprocal ACCEPT rule for two supplied IP addresses.
// Traffic is allowed from ip1 to ip2 and vice-versa
func (c *Chain) Link(action Action, ip1, ip2 net.IP, port int, proto string) error {
if output, err := Raw("-t", string(Filter), string(action), c.Name,
@@ -199,7 +216,7 @@ func (c *Chain) Link(action Action, ip1, ip2 net.IP, port int, proto string) err
return nil
}
-// Add linking rule to nat/PREROUTING chain.
+// Prerouting adds linking rule to nat/PREROUTING chain.
func (c *Chain) Prerouting(action Action, args ...string) error {
a := []string{"-t", string(Nat), string(action), "PREROUTING"}
if len(args) > 0 {
@@ -213,7 +230,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error {
return nil
}
-// Add linking rule to an OUTPUT chain
+// Output adds linking rule to an OUTPUT chain.
func (c *Chain) Output(action Action, args ...string) error {
a := []string{"-t", string(c.Table), string(action), "OUTPUT"}
if len(args) > 0 {
@@ -227,6 +244,7 @@ func (c *Chain) Output(action Action, args ...string) error {
return nil
}
+// Remove removes the chain.
func (c *Chain) Remove() error {
// Ignore errors - This could mean the chains were never set up
if c.Table == Nat {
@@ -242,7 +260,7 @@ func (c *Chain) Remove() error {
return nil
}
-// Check if a rule exists
+// Exists checks if a rule exists
func Exists(table Table, chain string, rule ...string) bool {
if string(table) == "" {
table = Filter
@@ -261,19 +279,12 @@ func Exists(table Table, chain string, rule ...string) bool {
// parse "iptables -S" for the rule (this checks rules in a specific chain
// in a specific table)
ruleString := strings.Join(rule, " ")
- existingRules, _ := exec.Command("iptables", "-t", string(table), "-S", chain).Output()
+ existingRules, _ := exec.Command(iptablesPath, "-t", string(table), "-S", chain).Output()
- // regex to replace ips in rule
- // because MASQUERADE rule will not be exactly what was passed
- re := regexp.MustCompile(`[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}`)
-
- return strings.Contains(
- re.ReplaceAllString(string(existingRules), "?"),
- re.ReplaceAllString(ruleString, "?"),
- )
+ return strings.Contains(string(existingRules), ruleString)
}
-// Call 'iptables' system command, passing supplied arguments
+// Raw calls 'iptables' system command, passing supplied arguments.
func Raw(args ...string) ([]byte, error) {
if firewalldRunning {
output, err := Passthrough(Iptables, args...)
@@ -288,6 +299,9 @@ func Raw(args ...string) ([]byte, error) {
}
if supportsXlock {
args = append([]string{"--wait"}, args...)
+ } else {
+ bestEffortLock.Lock()
+ defer bestEffortLock.Unlock()
}
logrus.Debugf("%s, %v", iptablesPath, args)
diff --git a/pkg/iptables/iptables_test.go b/vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go
similarity index 77%
rename from pkg/iptables/iptables_test.go
rename to vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go
index ced4262ce2..afb3587f1f 100644
--- a/pkg/iptables/iptables_test.go
+++ b/vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go
@@ -5,10 +5,13 @@ import (
"os/exec"
"strconv"
"strings"
+ "sync"
"testing"
+
+ _ "github.com/docker/libnetwork/netutils"
)
-const chainName = "DOCKERTEST"
+const chainName = "DOCKEREST"
var natChain *Chain
var filterChain *Chain
@@ -16,12 +19,12 @@ var filterChain *Chain
func TestNewChain(t *testing.T) {
var err error
- natChain, err = NewChain(chainName, "lo", Nat)
+ natChain, err = NewChain(chainName, "lo", Nat, false)
if err != nil {
t.Fatal(err)
}
- filterChain, err = NewChain(chainName, "lo", Filter)
+ filterChain, err = NewChain(chainName, "lo", Filter, false)
if err != nil {
t.Fatal(err)
}
@@ -40,7 +43,6 @@ func TestForward(t *testing.T) {
}
dnatRule := []string{
- "!", "-i", filterChain.Bridge,
"-d", ip.String(),
"-p", proto,
"--dport", strconv.Itoa(port),
@@ -169,6 +171,45 @@ func TestOutput(t *testing.T) {
}
}
+func TestConcurrencyWithWait(t *testing.T) {
+ RunConcurrencyTest(t, true)
+}
+
+func TestConcurrencyNoWait(t *testing.T) {
+ RunConcurrencyTest(t, false)
+}
+
+// Runs 10 concurrent rule additions. This will fail if iptables
+// is actually invoked simultaneously without --wait.
+// Note that if iptables does not support the xtable lock on this
+// system, then allowXlock has no effect -- it will always be off.
+func RunConcurrencyTest(t *testing.T, allowXlock bool) {
+ var wg sync.WaitGroup
+
+ if !allowXlock && supportsXlock {
+ supportsXlock = false
+ defer func() { supportsXlock = true }()
+ }
+
+ ip := net.ParseIP("192.168.1.1")
+ port := 1234
+ dstAddr := "172.17.0.1"
+ dstPort := 4321
+ proto := "tcp"
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := natChain.Forward(Append, ip, port, proto, dstAddr, dstPort)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
func TestCleanup(t *testing.T) {
var err error
var rules []byte
diff --git a/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go b/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go
new file mode 100644
index 0000000000..6a9a7fdc43
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go
@@ -0,0 +1,26 @@
+package libnetwork
+
+import (
+ "testing"
+
+ "github.com/docker/libnetwork/driverapi"
+)
+
+func TestDriverRegistration(t *testing.T) {
+ bridgeNetType := "bridge"
+ c, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = c.(*controller).RegisterDriver(bridgeNetType, nil)
+ if err == nil {
+ t.Fatalf("Expecting the RegisterDriver to fail for %s", bridgeNetType)
+ }
+ if _, ok := err.(driverapi.ErrActiveRegistration); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+ err = c.(*controller).RegisterDriver("test-dummy", nil)
+ if err != nil {
+ t.Fatalf("Test failed with an error %v", err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/libnetwork_test.go b/vendor/src/github.com/docker/libnetwork/libnetwork_test.go
new file mode 100644
index 0000000000..981128cdc6
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/libnetwork_test.go
@@ -0,0 +1,1528 @@
+package libnetwork_test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/libnetwork"
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/netutils"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/types"
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+const (
+ bridgeNetType = "bridge"
+ bridgeName = "docker0"
+)
+
+func TestMain(m *testing.M) {
+ if reexec.Init() {
+ return
+ }
+ os.Exit(m.Run())
+}
+
+func createTestNetwork(networkType, networkName string, option options.Generic, netOption options.Generic) (libnetwork.Network, error) {
+ controller, err := libnetwork.New()
+ if err != nil {
+ return nil, err
+ }
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = option
+
+ err = controller.ConfigureNetworkDriver(networkType, genericOption)
+ if err != nil {
+ return nil, err
+ }
+
+ network, err := controller.NewNetwork(networkType, networkName,
+ libnetwork.NetworkOptionGeneric(netOption))
+ if err != nil {
+ return nil, err
+ }
+
+ return network, nil
+}
+
+func getEmptyGenericOption() map[string]interface{} {
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = options.Generic{}
+ return genericOption
+}
+
+func getPortMapping() []types.PortBinding {
+ return []types.PortBinding{
+ types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+ types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+ types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+ }
+}
+
+func TestNull(t *testing.T) {
+ network, err := createTestNetwork("null", "testnetwork", options.Generic{},
+ options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := network.CreateEndpoint("testep")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep.Join("null_container",
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep.Leave("null_container")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ep.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := network.Delete(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestHost(t *testing.T) {
+ network, err := createTestNetwork("host", "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep1, err := network.CreateEndpoint("testep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep1.Join("host_container1",
+ libnetwork.JoinOptionHostname("test1"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+ libnetwork.JoinOptionUseDefaultSandbox())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep2, err := network.CreateEndpoint("testep2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep2.Join("host_container2",
+ libnetwork.JoinOptionHostname("test2"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+ libnetwork.JoinOptionUseDefaultSandbox())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep1.Leave("host_container1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep2.Leave("host_container2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ep1.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ep2.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Try to create another host endpoint and join/leave that.
+ ep3, err := network.CreateEndpoint("testep3")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep3.Join("host_container3",
+ libnetwork.JoinOptionHostname("test3"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+ libnetwork.JoinOptionUseDefaultSandbox())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep3.Leave("host_container3")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ep3.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := network.Delete(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBridge(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ ip, subnet, err := net.ParseCIDR("192.168.100.1/24")
+ if err != nil {
+ t.Fatal(err)
+ }
+ subnet.IP = ip
+
+ ip, cidr, err := net.ParseCIDR("192.168.100.2/28")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cidr.IP = ip
+
+ ip, cidrv6, err := net.ParseCIDR("fe90::1/96")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cidrv6.IP = ip
+
+ log.Debug("Adding a bridge")
+ option := options.Generic{
+ "EnableIPForwarding": true,
+ }
+
+ netOption := options.Generic{
+ "BridgeName": bridgeName,
+ "AddressIPv4": subnet,
+ "FixedCIDR": cidr,
+ "FixedCIDRv6": cidrv6,
+ "EnableIPv6": true,
+ "EnableIPTables": true,
+ "EnableIPMasquerade": true,
+ "EnableICC": true,
+ "AllowNonDefaultBridge": true}
+
+ network, err := createTestNetwork(bridgeNetType, "testnetwork", option, netOption)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := network.CreateEndpoint("testep", libnetwork.CreateOptionPortMapping(getPortMapping()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ epInfo, err := ep.DriverInfo()
+ if err != nil {
+ t.Fatal(err)
+ }
+ pmd, ok := epInfo[netlabel.PortMap]
+ if !ok {
+ t.Fatalf("Could not find expected info in endpoint data")
+ }
+ pm, ok := pmd.([]types.PortBinding)
+ if !ok {
+ t.Fatalf("Unexpected format for port mapping in endpoint operational data")
+ }
+ if len(pm) != 3 {
+ t.Fatalf("Incomplete data for port mapping in endpoint operational data: %d", len(pm))
+ }
+
+ if err := ep.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := network.Delete(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestUnknownDriver(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ _, err := createTestNetwork("unknowndriver", "testnetwork", options.Generic{}, options.Generic{})
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(libnetwork.NetworkTypeError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestNilRemoteDriver(t *testing.T) {
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NewNetwork("framerelay", "dummy",
+ libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(types.NotFoundError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestDuplicateNetwork(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ genericOption := make(map[string]interface{})
+ genericOption[netlabel.GenericData] = options.Generic{}
+
+ err = controller.ConfigureNetworkDriver(bridgeNetType, genericOption)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NewNetwork(bridgeNetType, "testnetwork",
+ libnetwork.NetworkOptionGeneric(genericOption))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NewNetwork(bridgeNetType, "testnetwork")
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(libnetwork.NetworkNameError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestNetworkName(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ _, err := createTestNetwork(bridgeNetType, "", options.Generic{}, options.Generic{})
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+ t.Fatalf("Expected to fail with ErrInvalidName error. Got %v", err)
+ }
+
+ networkName := "testnetwork"
+ n, err := createTestNetwork(bridgeNetType, networkName, options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n.Name() != networkName {
+ t.Fatalf("Expected network name %s, got %s", networkName, n.Name())
+ }
+}
+
+func TestNetworkType(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n.Type() != bridgeNetType {
+ t.Fatalf("Expected network type %s, got %s", bridgeNetType, n.Type())
+ }
+}
+
+func TestNetworkID(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n.ID() == "" {
+ t.Fatal("Expected non-empty network id")
+ }
+}
+
+func TestDeleteNetworkWithActiveEndpoints(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ option := options.Generic{
+ "BridgeName": bridgeName,
+ "AllowNonDefaultBridge": true}
+
+ network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := network.CreateEndpoint("testep")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = network.Delete()
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(*libnetwork.ActiveEndpointsError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+
+ // Done testing. Now cleanup.
+ if err := ep.Delete(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := network.Delete(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestUnknownNetwork(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ option := options.Generic{
+ "BridgeName": bridgeName,
+ "AllowNonDefaultBridge": true}
+
+ network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = network.Delete()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = network.Delete()
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(*libnetwork.UnknownNetworkError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestUnknownEndpoint(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ ip, subnet, err := net.ParseCIDR("192.168.100.1/24")
+ if err != nil {
+ t.Fatal(err)
+ }
+ subnet.IP = ip
+
+ option := options.Generic{
+ "BridgeName": bridgeName,
+ "AddressIPv4": subnet,
+ "AllowNonDefaultBridge": true}
+
+ network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = network.CreateEndpoint("")
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+ if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+ t.Fatalf("Expected to fail with ErrInvalidName error. Actual error: %v", err)
+ }
+
+ ep, err := network.CreateEndpoint("testep")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep.Delete()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep.Delete()
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(*libnetwork.UnknownEndpointError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+
+ // Done testing. Now cleanup
+ if err := network.Delete(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestNetworkEndpointsWalkers(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create network 1 and add 2 endpoint: ep11, ep12
+ net1, err := controller.NewNetwork(bridgeNetType, "network1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep11, err := net1.CreateEndpoint("ep11")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep12, err := net1.CreateEndpoint("ep12")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Test list methods on net1
+ epList1 := net1.Endpoints()
+ if len(epList1) != 2 {
+ t.Fatalf("Endpoints() returned wrong number of elements: %d instead of 2", len(epList1))
+ }
+ // endpoint order is not guaranteed
+ for _, e := range epList1 {
+ if e != ep11 && e != ep12 {
+ t.Fatal("Endpoints() did not return all the expected elements")
+ }
+ }
+
+ // Test Endpoint Walk method
+ var epName string
+ var epWanted libnetwork.Endpoint
+ wlk := func(ep libnetwork.Endpoint) bool {
+ if ep.Name() == epName {
+ epWanted = ep
+ return true
+ }
+ return false
+ }
+
+ // Look for ep1 on network1
+ epName = "ep11"
+ net1.WalkEndpoints(wlk)
+ if epWanted == nil {
+ t.Fatal(err)
+ }
+ if ep11 != epWanted {
+ t.Fatal(err)
+ }
+
+ // Test Network Walk method
+ var netName string
+ var netWanted libnetwork.Network
+ nwWlk := func(nw libnetwork.Network) bool {
+ if nw.Name() == netName {
+ netWanted = nw
+ return true
+ }
+ return false
+ }
+
+ // Look for network named "network1"
+ netName = "network1"
+ controller.WalkNetworks(nwWlk)
+ if netWanted == nil {
+ t.Fatal(err)
+ }
+ if net1 != netWanted {
+ t.Fatal(err)
+ }
+}
+
+func TestControllerQuery(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create network 1
+ net1, err := controller.NewNetwork(bridgeNetType, "network1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NetworkByName("")
+ if err == nil {
+ t.Fatalf("NetworkByName() succeeded with invalid target name")
+ }
+ if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+ t.Fatalf("Expected NetworkByName() to fail with ErrInvalidName error. Got: %v", err)
+ }
+
+ _, err = controller.NetworkByID("")
+ if err == nil {
+ t.Fatalf("NetworkByID() succeeded with invalid target id")
+ }
+ if _, ok := err.(libnetwork.ErrInvalidID); !ok {
+ t.Fatalf("NetworkByID() failed with unexpected error: %v", err)
+ }
+
+ g, err := controller.NetworkByID("network1")
+ if err == nil {
+ t.Fatalf("Unexpected success for NetworkByID(): %v", g)
+ }
+ if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
+ t.Fatalf("NetworkByID() failed with unexpected error: %v", err)
+ }
+
+ g, err = controller.NetworkByName("network1")
+ if err != nil {
+ t.Fatalf("Unexpected failure for NetworkByName(): %v", err)
+ }
+ if g == nil {
+ t.Fatalf("NetworkByName() did not find the network")
+ }
+
+ if g != net1 {
+ t.Fatalf("NetworkByName() returned the wrong network")
+ }
+
+ g, err = controller.NetworkByID(net1.ID())
+ if err != nil {
+ t.Fatalf("Unexpected failure for NetworkByID(): %v", err)
+ }
+ if net1 != g {
+ t.Fatalf("NetworkByID() returned unexpected element: %v", g)
+ }
+}
+
+func TestNetworkQuery(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create network 1 and add 2 endpoint: ep11, ep12
+ net1, err := controller.NewNetwork(bridgeNetType, "network1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep11, err := net1.CreateEndpoint("ep11")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ep12, err := net1.CreateEndpoint("ep12")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e, err := net1.EndpointByName("ep11")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ep11 != e {
+ t.Fatalf("EndpointByName() returned %v instead of %v", e, ep11)
+ }
+
+ e, err = net1.EndpointByName("")
+ if err == nil {
+ t.Fatalf("EndpointByName() succeeded with invalid target name")
+ }
+ if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+ t.Fatalf("Expected EndpointByName() to fail with ErrInvalidName error. Got: %v", err)
+ }
+
+ e, err = net1.EndpointByName("IamNotAnEndpoint")
+ if err == nil {
+ t.Fatalf("EndpointByName() succeeded with unknown target name")
+ }
+ if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
+ t.Fatal(err)
+ }
+ if e != nil {
+ t.Fatalf("EndpointByName(): expected nil, got %v", e)
+ }
+
+ e, err = net1.EndpointByID(ep12.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ep12 != e {
+ t.Fatalf("EndpointByID() returned %v instead of %v", e, ep12)
+ }
+
+ e, err = net1.EndpointByID("")
+ if err == nil {
+ t.Fatalf("EndpointByID() succeeded with invalid target id")
+ }
+ if _, ok := err.(libnetwork.ErrInvalidID); !ok {
+ t.Fatalf("EndpointByID() failed with unexpected error: %v", err)
+ }
+}
+
+const containerID = "valid_container"
+
+func checkSandbox(t *testing.T, info libnetwork.EndpointInfo) {
+ origns, err := netns.Get()
+ if err != nil {
+ t.Fatalf("Could not get the current netns: %v", err)
+ }
+ defer origns.Close()
+
+ key := info.SandboxKey()
+ f, err := os.OpenFile(key, os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Failed to open network namespace path %q: %v", key, err)
+ }
+ defer f.Close()
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ nsFD := f.Fd()
+ if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ t.Fatalf("Setting to the namespace pointed to by the sandbox %s failed: %v", key, err)
+ }
+ defer netns.Set(origns)
+
+ _, err = netlink.LinkByName("eth0")
+ if err != nil {
+ t.Fatalf("Could not find the interface eth0 inside the sandbox: %v", err)
+ }
+}
+
+func TestEndpointJoin(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := n.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Validate if ep.Info() only gives me IP address info and not names and gateway during CreateEndpoint()
+ info := ep.Info()
+
+ for _, iface := range info.InterfaceList() {
+ if iface.Address().IP.To4() == nil {
+ t.Fatalf("Invalid IP address returned: %v", iface.Address())
+ }
+ }
+
+ if info.Gateway().To4() != nil {
+ t.Fatalf("Expected empty gateway for an empty endpoint. Instead found a gateway: %v", info.Gateway())
+ }
+
+ if info.SandboxKey() != "" {
+ t.Fatalf("Expected an empty sandbox key for an empty endpoint. Instead found a non-empty sandbox key: %s", info.SandboxKey())
+ }
+
+ _, err = ep.Join(containerID,
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ err = ep.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // Validate if ep.Info() only gives valid gateway and sandbox key after has container has joined.
+ info = ep.Info()
+ if info.Gateway().To4() == nil {
+ t.Fatalf("Expected a valid gateway for a joined endpoint. Instead found an invalid gateway: %v", info.Gateway())
+ }
+
+ if info.SandboxKey() == "" {
+ t.Fatalf("Expected an non-empty sandbox key for a joined endpoint. Instead found a empty sandbox key")
+ }
+
+ checkSandbox(t, info)
+}
+
+func TestEndpointJoinInvalidContainerId(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := n.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep.Join("")
+ if err == nil {
+ t.Fatal("Expected to fail join with empty container id string")
+ }
+
+ if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+}
+
+func TestEndpointDeleteWithActiveContainer(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := n.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep.Join(containerID,
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep.Delete()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ err = ep.Delete()
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if _, ok := err.(*libnetwork.ActiveContainerError); !ok {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestEndpointMultipleJoins(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := n.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep.Join(containerID,
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ _, err = ep.Join("container2")
+ if err == nil {
+ t.Fatal("Expected to fail multiple joins for the same endpoint")
+ }
+
+ if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+}
+
+func TestEndpointInvalidLeave(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep, err := n.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep.Leave(containerID)
+ if err == nil {
+ t.Fatal("Expected to fail leave from an endpoint which has no active join")
+ }
+
+ if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+ if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+ }
+
+ _, err = ep.Join(containerID,
+ libnetwork.JoinOptionHostname("test"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ err = ep.Leave("")
+ if err == nil {
+ t.Fatal("Expected to fail leave with empty container id")
+ }
+
+ if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+
+ err = ep.Leave("container2")
+ if err == nil {
+ t.Fatal("Expected to fail leave with wrong container id")
+ }
+
+ if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+ t.Fatalf("Failed for unexpected reason: %v", err)
+ }
+
+}
+
+func TestEndpointUpdateParent(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep1, err := n.CreateEndpoint("ep1", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep1.Join(containerID,
+ libnetwork.JoinOptionHostname("test1"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep1.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ ep2, err := n.CreateEndpoint("ep2", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep2.Join("container2",
+ libnetwork.JoinOptionHostname("test2"),
+ libnetwork.JoinOptionDomainname("docker.io"),
+ libnetwork.JoinOptionHostsPath("/var/lib/docker/test_network/container2/hosts"),
+ libnetwork.JoinOptionParentUpdate(ep1.ID(), "web", "192.168.0.2"))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ err = ep2.Leave("container2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+}
+
+func TestEnableIPv6(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888")
+ //take a copy of resolv.conf for restoring after test completes
+ resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ t.Fatal(err)
+ }
+ //cleanup
+ defer func() {
+ if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ ip, cidrv6, err := net.ParseCIDR("fe80::1/64")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cidrv6.IP = ip
+
+ netOption := options.Generic{
+ netlabel.EnableIPv6: true,
+ netlabel.GenericData: options.Generic{
+ "FixedCIDRv6": cidrv6,
+ },
+ }
+
+ n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, netOption)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep1, err := n.CreateEndpoint("ep1", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ resolvConfPath := "/tmp/libnetwork_test/resolv.conf"
+ defer os.Remove(resolvConfPath)
+
+ _, err = ep1.Join(containerID,
+ libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep1.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ content, err := ioutil.ReadFile(resolvConfPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(content, tmpResolvConf) {
+ t.Fatalf("Expected %s, Got %s", string(tmpResolvConf), string(content))
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestResolvConf(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ defer netutils.SetupTestNetNS(t)()
+ }
+
+ tmpResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888")
+ expectedResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
+ tmpResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\nnameserver 2001:4860:4860::8888")
+ expectedResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\n")
+ tmpResolvConf3 := []byte("search pommesfrites.fr\nnameserver 113.34.56.78\n")
+
+ //take a copy of resolv.conf for restoring after test completes
+ resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ t.Fatal(err)
+ }
+ //cleanup
+ defer func() {
+ if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, options.Generic{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ep1, err := n.CreateEndpoint("ep1", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf1, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ resolvConfPath := "/tmp/libnetwork_test/resolv.conf"
+ defer os.Remove(resolvConfPath)
+
+ _, err = ep1.Join(containerID,
+ libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = ep1.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ finfo, err := os.Stat(resolvConfPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fmode := (os.FileMode)(0644)
+ if finfo.Mode() != fmode {
+ t.Fatalf("Expected file mode %s, got %s", fmode.String(), finfo.Mode().String())
+ }
+
+ content, err := ioutil.ReadFile(resolvConfPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(content, expectedResolvConf1) {
+ t.Fatalf("Expected %s, Got %s", string(expectedResolvConf1), string(content))
+ }
+
+ err = ep1.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf2, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep1.Join(containerID,
+ libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ content, err = ioutil.ReadFile(resolvConfPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(content, expectedResolvConf2) {
+ t.Fatalf("Expected %s, Got %s", string(expectedResolvConf2), string(content))
+ }
+
+ if err := ioutil.WriteFile(resolvConfPath, tmpResolvConf3, 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ err = ep1.Leave(containerID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = ep1.Join(containerID,
+ libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ content, err = ioutil.ReadFile(resolvConfPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(content, tmpResolvConf3) {
+ t.Fatalf("Expected %s, Got %s", string(tmpResolvConf3), string(content))
+ }
+}
+
+func TestInvalidRemoteDriver(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("Skipping test when not running inside a Container")
+ }
+
+ mux := http.NewServeMux()
+ server := httptest.NewServer(mux)
+ if server == nil {
+ t.Fatal("Failed to start a HTTP Server")
+ }
+ defer server.Close()
+
+ type pluginRequest struct {
+ name string
+ }
+
+ mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+ fmt.Fprintln(w, `{"Implements": ["InvalidDriver"]}`)
+ })
+
+ if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ if err := ioutil.WriteFile("/usr/share/docker/plugins/invalid-network-driver.spec", []byte(server.URL), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NewNetwork("invalid-network-driver", "dummy",
+ libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+ if err == nil {
+ t.Fatal("Expected to fail. But instead succeeded")
+ }
+
+ if err != plugins.ErrNotImplements {
+ t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+ }
+}
+
+func TestValidRemoteDriver(t *testing.T) {
+ if !netutils.IsRunningInContainer() {
+ t.Skip("Skipping test when not running inside a Container")
+ }
+
+ mux := http.NewServeMux()
+ server := httptest.NewServer(mux)
+ if server == nil {
+ t.Fatal("Failed to start a HTTP Server")
+ }
+ defer server.Close()
+
+ type pluginRequest struct {
+ name string
+ }
+
+ mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+ fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType)
+ })
+ mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+ fmt.Fprintf(w, "null")
+ })
+
+ if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ if err := ioutil.WriteFile("/usr/share/docker/plugins/valid-network-driver.spec", []byte(server.URL), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ controller, err := libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = controller.NewNetwork("valid-network-driver", "dummy",
+ libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+var (
+ once sync.Once
+ ctrlr libnetwork.NetworkController
+ start = make(chan struct{})
+ done = make(chan chan struct{}, numThreads-1)
+ origns = netns.None()
+ testns = netns.None()
+)
+
+const (
+ iterCnt = 25
+ numThreads = 3
+ first = 1
+ last = numThreads
+ debug = false
+)
+
+func createGlobalInstance(t *testing.T) {
+ var err error
+ defer close(start)
+
+ origns, err = netns.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if netutils.IsRunningInContainer() {
+ testns = origns
+ } else {
+ testns, err = netns.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ ctrlr, err = libnetwork.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ctrlr.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+ if err != nil {
+ t.Fatal("configure driver")
+ }
+
+ net, err := ctrlr.NewNetwork(bridgeNetType, "network1")
+ if err != nil {
+ t.Fatal("new network")
+ }
+
+ _, err = net.CreateEndpoint("ep1")
+ if err != nil {
+ t.Fatal("createendpoint")
+ }
+}
+
+func debugf(format string, a ...interface{}) (int, error) {
+ if debug {
+ return fmt.Printf(format, a...)
+ }
+
+ return 0, nil
+}
+
+func parallelJoin(t *testing.T, ep libnetwork.Endpoint, thrNumber int) {
+ debugf("J%d.", thrNumber)
+ _, err := ep.Join("racing_container")
+ runtime.LockOSThread()
+ if err != nil {
+ if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+ if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+ t.Fatal(err)
+ }
+ }
+ debugf("JE%d(%v).", thrNumber, err)
+ }
+ debugf("JD%d.", thrNumber)
+}
+
+func parallelLeave(t *testing.T, ep libnetwork.Endpoint, thrNumber int) {
+ debugf("L%d.", thrNumber)
+ err := ep.Leave("racing_container")
+ runtime.LockOSThread()
+ if err != nil {
+ if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+ if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+ t.Fatal(err)
+ }
+ }
+ debugf("LE%d(%v).", thrNumber, err)
+ }
+ debugf("LD%d.", thrNumber)
+}
+
+func runParallelTests(t *testing.T, thrNumber int) {
+ var err error
+
+ t.Parallel()
+
+ pTest := flag.Lookup("test.parallel")
+ if pTest == nil {
+ t.Skip("Skipped because test.parallel flag not set;")
+ }
+ numParallel, err := strconv.Atoi(pTest.Value.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if numParallel < numThreads {
+ t.Skip("Skipped because t.parallel was less than ", numThreads)
+ }
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ if thrNumber == first {
+ createGlobalInstance(t)
+ }
+
+ if thrNumber != first {
+ select {
+ case <-start:
+ }
+
+ thrdone := make(chan struct{})
+ done <- thrdone
+ defer close(thrdone)
+
+ if thrNumber == last {
+ defer close(done)
+ }
+
+ err = netns.Set(testns)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ defer netns.Set(origns)
+
+ net, err := ctrlr.NetworkByName("network1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if net == nil {
+ t.Fatal("Could not find network1")
+ }
+
+ ep, err := net.EndpointByName("ep1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ep == nil {
+ t.Fatal("Got nil ep with no error")
+ }
+
+ for i := 0; i < iterCnt; i++ {
+ parallelJoin(t, ep, thrNumber)
+ parallelLeave(t, ep, thrNumber)
+ }
+
+ debugf("\n")
+
+ if thrNumber == first {
+ for thrdone := range done {
+ select {
+ case <-thrdone:
+ }
+ }
+
+ testns.Close()
+ err = ep.Delete()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestParallel1(t *testing.T) {
+ runParallelTests(t, 1)
+}
+
+func TestParallel2(t *testing.T) {
+ runParallelTests(t, 2)
+}
+
+func TestParallel3(t *testing.T) {
+ runParallelTests(t, 3)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netlabel/labels.go b/vendor/src/github.com/docker/libnetwork/netlabel/labels.go
new file mode 100644
index 0000000000..adbabbc475
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netlabel/labels.go
@@ -0,0 +1,18 @@
+package netlabel
+
+const (
+ // GenericData constant that helps to identify an option as a Generic constant
+ GenericData = "io.docker.network.generic"
+
+ // PortMap constant represents Port Mapping
+ PortMap = "io.docker.network.endpoint.portmap"
+
+ // MacAddress constant represents Mac Address config of a Container
+ MacAddress = "io.docker.network.endpoint.macaddress"
+
+ // ExposedPorts constant represents exposedports of a Container
+ ExposedPorts = "io.docker.network.endpoint.exposedports"
+
+ //EnableIPv6 constant represents enabling IPV6 at network level
+ EnableIPv6 = "io.docker.network.enable_ipv6"
+)
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go b/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go
new file mode 100644
index 0000000000..d0a2fab789
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go
@@ -0,0 +1,41 @@
+package netutils
+
+import (
+ "flag"
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+var runningInContainer = flag.Bool("incontainer", false, "Indicates if the test is running in a container")
+
+// IsRunningInContainer returns whether the test is running inside a container.
+func IsRunningInContainer() bool {
+ return (*runningInContainer)
+}
+
+// SetupTestNetNS joins a new network namespace, and returns its associated
+// teardown function.
+//
+// Example usage:
+//
+// defer SetupTestNetNS(t)()
+//
+func SetupTestNetNS(t *testing.T) func() {
+ runtime.LockOSThread()
+ if err := syscall.Unshare(syscall.CLONE_NEWNET); err != nil {
+ t.Fatalf("Failed to enter netns: %v", err)
+ }
+
+ fd, err := syscall.Open("/proc/self/ns/net", syscall.O_RDONLY, 0)
+ if err != nil {
+ t.Fatal("Failed to open netns file")
+ }
+
+ return func() {
+ if err := syscall.Close(fd); err != nil {
+ t.Logf("Warning: netns closing failed (%v)", err)
+ }
+ runtime.UnlockOSThread()
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/utils.go b/vendor/src/github.com/docker/libnetwork/netutils/utils.go
new file mode 100644
index 0000000000..98da12e981
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netutils/utils.go
@@ -0,0 +1,149 @@
+// Network utility functions.
+
+package netutils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+
+ "github.com/vishvananda/netlink"
+)
+
+var (
+ // ErrNetworkOverlapsWithNameservers preformatted error
+ ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver")
+ // ErrNetworkOverlaps preformatted error
+ ErrNetworkOverlaps = errors.New("requested network overlaps with existing network")
+ // ErrNoDefaultRoute preformatted error
+ ErrNoDefaultRoute = errors.New("no default route")
+
+ networkGetRoutesFct = netlink.RouteList
+)
+
+// CheckNameserverOverlaps checks whether the passed network overlaps with any of the nameservers
+func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error {
+ if len(nameservers) > 0 {
+ for _, ns := range nameservers {
+ _, nsNetwork, err := net.ParseCIDR(ns)
+ if err != nil {
+ return err
+ }
+ if NetworkOverlaps(toCheck, nsNetwork) {
+ return ErrNetworkOverlapsWithNameservers
+ }
+ }
+ }
+ return nil
+}
+
+// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes
+func CheckRouteOverlaps(toCheck *net.IPNet) error {
+ networks, err := networkGetRoutesFct(nil, netlink.FAMILY_V4)
+ if err != nil {
+ return err
+ }
+
+ for _, network := range networks {
+ if network.Dst != nil && NetworkOverlaps(toCheck, network.Dst) {
+ return ErrNetworkOverlaps
+ }
+ }
+ return nil
+}
+
+// NetworkOverlaps detects overlap between one IPNet and another
+func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
+ // Check if both netX and netY are ipv4 or ipv6
+ if (netX.IP.To4() != nil && netY.IP.To4() != nil) ||
+ (netX.IP.To4() == nil && netY.IP.To4() == nil) {
+ if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
+ return true
+ }
+ if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
+ return true
+ }
+ }
+ return false
+}
+
+// NetworkRange calculates the first and last IP addresses in an IPNet
+func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
+ var netIP net.IP
+ if network.IP.To4() != nil {
+ netIP = network.IP.To4()
+ } else if network.IP.To16() != nil {
+ netIP = network.IP.To16()
+ } else {
+ return nil, nil
+ }
+
+ lastIP := make([]byte, len(netIP), len(netIP))
+ for i := 0; i < len(netIP); i++ {
+ lastIP[i] = netIP[i] | ^network.Mask[i]
+ }
+ return netIP.Mask(network.Mask), net.IP(lastIP)
+}
+
+// GetIfaceAddr returns the first IPv4 address and slice of IPv6 addresses for the specified network interface
+func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) {
+ iface, err := net.InterfaceByName(name)
+ if err != nil {
+ return nil, nil, err
+ }
+ addrs, err := iface.Addrs()
+ if err != nil {
+ return nil, nil, err
+ }
+ var addrs4 []net.Addr
+ var addrs6 []net.Addr
+ for _, addr := range addrs {
+ ip := (addr.(*net.IPNet)).IP
+ if ip4 := ip.To4(); ip4 != nil {
+ addrs4 = append(addrs4, addr)
+ } else if ip6 := ip.To16(); len(ip6) == net.IPv6len {
+ addrs6 = append(addrs6, addr)
+ }
+ }
+ switch {
+ case len(addrs4) == 0:
+ return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name)
+ case len(addrs4) > 1:
+ fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
+ name, (addrs4[0].(*net.IPNet)).IP)
+ }
+ return addrs4[0], addrs6, nil
+}
+
+// GenerateRandomMAC returns a new 6-byte(48-bit) hardware address (MAC)
+func GenerateRandomMAC() net.HardwareAddr {
+ hw := make(net.HardwareAddr, 6)
+ // The first byte of the MAC address has to comply with these rules:
+ // 1. Unicast: Set the least-significant bit to 0.
+ // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1.
+ // 3. As "small" as possible: The veth address has to be "smaller" than the bridge address.
+ hw[0] = 0x02
+ // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI).
+ // Since this address is locally administered, we can do whatever we want as long as
+ // it doesn't conflict with other addresses.
+ hw[1] = 0x42
+ // Randomly generate the remaining 4 bytes (2^32)
+ _, err := rand.Read(hw[2:])
+ if err != nil {
+ return nil
+ }
+ return hw
+}
+
+// GenerateRandomName returns a new name joined with a prefix. This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ return "", err
+ }
+ return prefix + hex.EncodeToString(id)[:size], nil
+}
diff --git a/daemon/networkdriver/network_test.go b/vendor/src/github.com/docker/libnetwork/netutils/utils_test.go
similarity index 79%
rename from daemon/networkdriver/network_test.go
rename to vendor/src/github.com/docker/libnetwork/netutils/utils_test.go
index 1a6336b5de..78de626e7f 100644
--- a/daemon/networkdriver/network_test.go
+++ b/vendor/src/github.com/docker/libnetwork/netutils/utils_test.go
@@ -1,9 +1,11 @@
-package networkdriver
+package netutils
import (
- "github.com/docker/libcontainer/netlink"
+ "bytes"
"net"
"testing"
+
+ "github.com/vishvananda/netlink"
)
func TestNonOverlapingNameservers(t *testing.T) {
@@ -39,13 +41,13 @@ func TestCheckRouteOverlaps(t *testing.T) {
defer func() {
networkGetRoutesFct = orig
}()
- networkGetRoutesFct = func() ([]netlink.Route, error) {
+ networkGetRoutesFct = func(netlink.Link, int) ([]netlink.Route, error) {
routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"}
routes := []netlink.Route{}
for _, addr := range routesData {
_, netX, _ := net.ParseCIDR(addr)
- routes = append(routes, netlink.Route{IPNet: netX})
+ routes = append(routes, netlink.Route{Dst: netX})
}
return routes, nil
}
@@ -173,3 +175,37 @@ func TestNetworkRange(t *testing.T) {
t.Error(last.String())
}
}
+
+// Test veth name generation "veth"+rand (e.g.veth0f60e2c)
+func TestGenerateRandomName(t *testing.T) {
+ name1, err := GenerateRandomName("veth", 7)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // veth plus generated append equals a len of 11
+ if len(name1) != 11 {
+ t.Fatalf("Expected 11 characters, instead received %d characters", len(name1))
+ }
+ name2, err := GenerateRandomName("veth", 7)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Fail if the random generated names equal one another
+ if name1 == name2 {
+ t.Fatalf("Expected differing values but received %s and %s", name1, name2)
+ }
+}
+
+// Test mac generation.
+func TestUtilGenerateRandomMAC(t *testing.T) {
+ mac1 := GenerateRandomMAC()
+ mac2 := GenerateRandomMAC()
+ // ensure bytes are unique
+ if bytes.Equal(mac1, mac2) {
+ t.Fatalf("mac1 %s should not equal mac2 %s", mac1, mac2)
+ }
+ // existing tests check string functionality so keeping the pattern
+ if mac1.String() == mac2.String() {
+ t.Fatalf("mac1 %s should not equal mac2 %s", mac1, mac2)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
new file mode 100644
index 0000000000..36938a5458
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -0,0 +1,207 @@
+package libnetwork
+
+import (
+ "sync"
+
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/libnetwork/driverapi"
+ "github.com/docker/libnetwork/netlabel"
+ "github.com/docker/libnetwork/options"
+ "github.com/docker/libnetwork/types"
+)
+
+// A Network represents a logical connectivity zone that containers may
+// join using the Link method. A Network is managed by a specific driver.
+type Network interface {
+ // A user chosen name for this network.
+ Name() string
+
+ // A system generated id for this network.
+ ID() string
+
+ // The type of network, which corresponds to its managing driver.
+ Type() string
+
+ // Create a new endpoint to this network symbolically identified by the
+ // specified unique name. The options parameter carry driver specific options.
+ // Labels support will be added in the near future.
+ CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error)
+
+ // Delete the network.
+ Delete() error
+
+ // Endpoints returns the list of Endpoint(s) in this network.
+ Endpoints() []Endpoint
+
+ // WalkEndpoints uses the provided function to walk the Endpoints
+ WalkEndpoints(walker EndpointWalker)
+
+ // EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned.
+ EndpointByName(name string) (Endpoint, error)
+
+ // EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned.
+ EndpointByID(id string) (Endpoint, error)
+}
+
+// EndpointWalker is a client provided function which will be used to walk the Endpoints.
+// When the function returns true, the walk will stop.
+type EndpointWalker func(ep Endpoint) bool
+
+type network struct {
+ ctrlr *controller
+ name string
+ networkType string
+ id types.UUID
+ driver driverapi.Driver
+ enableIPv6 bool
+ endpoints endpointTable
+ generic options.Generic
+ sync.Mutex
+}
+
+func (n *network) Name() string {
+ return n.name
+}
+
+func (n *network) ID() string {
+ return string(n.id)
+}
+
+func (n *network) Type() string {
+ if n.driver == nil {
+ return ""
+ }
+
+ return n.driver.Type()
+}
+
+// NetworkOption is a option setter function type used to pass varios options to
+// NewNetwork method. The various setter functions of type NetworkOption are
+// provided by libnetwork, they look like NetworkOptionXXXX(...)
+type NetworkOption func(n *network)
+
+// NetworkOptionGeneric function returns an option setter for a Generic option defined
+// in a Dictionary of Key-Value pair
+func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption {
+ return func(n *network) {
+ n.generic = generic
+ if _, ok := generic[netlabel.EnableIPv6]; ok {
+ n.enableIPv6 = generic[netlabel.EnableIPv6].(bool)
+ }
+ }
+}
+
+func (n *network) processOptions(options ...NetworkOption) {
+ for _, opt := range options {
+ if opt != nil {
+ opt(n)
+ }
+ }
+}
+
+func (n *network) Delete() error {
+ var err error
+
+ n.ctrlr.Lock()
+ _, ok := n.ctrlr.networks[n.id]
+ if !ok {
+ n.ctrlr.Unlock()
+ return &UnknownNetworkError{name: n.name, id: string(n.id)}
+ }
+
+ n.Lock()
+ numEps := len(n.endpoints)
+ n.Unlock()
+ if numEps != 0 {
+ n.ctrlr.Unlock()
+ return &ActiveEndpointsError{name: n.name, id: string(n.id)}
+ }
+
+ delete(n.ctrlr.networks, n.id)
+ n.ctrlr.Unlock()
+ defer func() {
+ if err != nil {
+ n.ctrlr.Lock()
+ n.ctrlr.networks[n.id] = n
+ n.ctrlr.Unlock()
+ }
+ }()
+
+ err = n.driver.DeleteNetwork(n.id)
+ return err
+}
+
+func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
+ if name == "" {
+ return nil, ErrInvalidName(name)
+ }
+ ep := &endpoint{name: name, iFaces: []*endpointInterface{}, generic: make(map[string]interface{})}
+ ep.id = types.UUID(stringid.GenerateRandomID())
+ ep.network = n
+ ep.processOptions(options...)
+
+ d := n.driver
+ err := d.CreateEndpoint(n.id, ep.id, ep, ep.generic)
+ if err != nil {
+ return nil, err
+ }
+
+ n.Lock()
+ n.endpoints[ep.id] = ep
+ n.Unlock()
+ return ep, nil
+}
+
+func (n *network) Endpoints() []Endpoint {
+ n.Lock()
+ defer n.Unlock()
+ list := make([]Endpoint, 0, len(n.endpoints))
+ for _, e := range n.endpoints {
+ list = append(list, e)
+ }
+
+ return list
+}
+
+func (n *network) WalkEndpoints(walker EndpointWalker) {
+ for _, e := range n.Endpoints() {
+ if walker(e) {
+ return
+ }
+ }
+}
+
+func (n *network) EndpointByName(name string) (Endpoint, error) {
+ if name == "" {
+ return nil, ErrInvalidName(name)
+ }
+ var e Endpoint
+
+ s := func(current Endpoint) bool {
+ if current.Name() == name {
+ e = current
+ return true
+ }
+ return false
+ }
+
+ n.WalkEndpoints(s)
+
+ if e == nil {
+ return nil, ErrNoSuchEndpoint(name)
+ }
+
+ return e, nil
+}
+
+func (n *network) EndpointByID(id string) (Endpoint, error) {
+ if id == "" {
+ return nil, ErrInvalidID(id)
+ }
+ n.Lock()
+ defer n.Unlock()
+ if e, ok := n.endpoints[types.UUID(id)]; ok {
+ return e, nil
+ }
+ return nil, ErrNoSuchEndpoint(id)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/options/options.go b/vendor/src/github.com/docker/libnetwork/options/options.go
new file mode 100644
index 0000000000..e0e93ff9b7
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/options/options.go
@@ -0,0 +1,73 @@
+// Package options provides a way to pass unstructured sets of options to a
+// component expecting a strongly-typed configuration structure.
+package options
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// NoSuchFieldError is the error returned when the generic parameters hold a
+// value for a field absent from the destination structure.
+type NoSuchFieldError struct {
+ Field string
+ Type string
+}
+
+func (e NoSuchFieldError) Error() string {
+ return fmt.Sprintf("no field %q in type %q", e.Field, e.Type)
+}
+
+// CannotSetFieldError is the error returned when the generic parameters hold a
+// value for a field that cannot be set in the destination structure.
+type CannotSetFieldError struct {
+ Field string
+ Type string
+}
+
+func (e CannotSetFieldError) Error() string {
+ return fmt.Sprintf("cannot set field %q of type %q", e.Field, e.Type)
+}
+
+// Generic is an basic type to store arbitrary settings.
+type Generic map[string]interface{}
+
+// NewGeneric returns a new Generic instance.
+func NewGeneric() Generic {
+ return make(Generic)
+}
+
+// GenerateFromModel takes the generic options, and tries to build a new
+// instance of the model's type by matching keys from the generic options to
+// fields in the model.
+//
+// The return value is of the same type than the model (including a potential
+// pointer qualifier).
+func GenerateFromModel(options Generic, model interface{}) (interface{}, error) {
+ modType := reflect.TypeOf(model)
+
+ // If the model is of pointer type, we need to dereference for New.
+ resType := reflect.TypeOf(model)
+ if modType.Kind() == reflect.Ptr {
+ resType = resType.Elem()
+ }
+
+ // Populate the result structure with the generic layout content.
+ res := reflect.New(resType)
+ for name, value := range options {
+ field := res.Elem().FieldByName(name)
+ if !field.IsValid() {
+ return nil, NoSuchFieldError{name, resType.String()}
+ }
+ if !field.CanSet() {
+ return nil, CannotSetFieldError{name, resType.String()}
+ }
+ field.Set(reflect.ValueOf(value))
+ }
+
+ // If the model is not of pointer type, return content of the result.
+ if modType.Kind() == reflect.Ptr {
+ return res.Interface(), nil
+ }
+ return res.Elem().Interface(), nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/options/options_test.go b/vendor/src/github.com/docker/libnetwork/options/options_test.go
new file mode 100644
index 0000000000..ecd3b3b311
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/options/options_test.go
@@ -0,0 +1,97 @@
+package options
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ _ "github.com/docker/libnetwork/netutils"
+)
+
+func TestGenerate(t *testing.T) {
+ gen := NewGeneric()
+ gen["Int"] = 1
+ gen["Rune"] = 'b'
+ gen["Float64"] = 2.0
+
+ type Model struct {
+ Int int
+ Rune rune
+ Float64 float64
+ }
+
+ result, err := GenerateFromModel(gen, Model{})
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cast, ok := result.(Model)
+ if !ok {
+ t.Fatalf("result has unexpected type %s", reflect.TypeOf(result))
+ }
+ if expected := 1; cast.Int != expected {
+ t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Int)
+ }
+ if expected := 'b'; cast.Rune != expected {
+ t.Fatalf("wrong value for field Rune: expected %v, got %v", expected, cast.Rune)
+ }
+ if expected := 2.0; cast.Float64 != expected {
+ t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Float64)
+ }
+}
+
+func TestGeneratePtr(t *testing.T) {
+ gen := NewGeneric()
+ gen["Int"] = 1
+ gen["Rune"] = 'b'
+ gen["Float64"] = 2.0
+
+ type Model struct {
+ Int int
+ Rune rune
+ Float64 float64
+ }
+
+ result, err := GenerateFromModel(gen, &Model{})
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cast, ok := result.(*Model)
+ if !ok {
+ t.Fatalf("result has unexpected type %s", reflect.TypeOf(result))
+ }
+ if expected := 1; cast.Int != expected {
+ t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Int)
+ }
+ if expected := 'b'; cast.Rune != expected {
+ t.Fatalf("wrong value for field Rune: expected %v, got %v", expected, cast.Rune)
+ }
+ if expected := 2.0; cast.Float64 != expected {
+ t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Float64)
+ }
+}
+
+func TestGenerateMissingField(t *testing.T) {
+ type Model struct{}
+ _, err := GenerateFromModel(Generic{"foo": "bar"}, Model{})
+
+ if _, ok := err.(NoSuchFieldError); !ok {
+ t.Fatalf("expected NoSuchFieldError, got %#v", err)
+ } else if expected := "no field"; !strings.Contains(err.Error(), expected) {
+ t.Fatalf("expected %q in error message, got %s", expected, err.Error())
+ }
+}
+
+func TestFieldCannotBeSet(t *testing.T) {
+ type Model struct{ foo int }
+ _, err := GenerateFromModel(Generic{"foo": "bar"}, Model{})
+
+ if _, ok := err.(CannotSetFieldError); !ok {
+ t.Fatalf("expected CannotSetFieldError, got %#v", err)
+ } else if expected := "cannot set field"; !strings.Contains(err.Error(), expected) {
+ t.Fatalf("expected %q in error message, got %s", expected, err.Error())
+ }
+}
diff --git a/daemon/networkdriver/portallocator/portallocator.go b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go
similarity index 69%
rename from daemon/networkdriver/portallocator/portallocator.go
rename to vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go
index c1f414b673..84b07b2523 100644
--- a/daemon/networkdriver/portallocator/portallocator.go
+++ b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go
@@ -12,47 +12,60 @@ import (
)
const (
+ // DefaultPortRangeStart indicates the first port in port range
DefaultPortRangeStart = 49153
- DefaultPortRangeEnd = 65535
+ // DefaultPortRangeEnd indicates the last port in port range
+ DefaultPortRangeEnd = 65535
)
type ipMapping map[string]protoMap
var (
+ // ErrAllPortsAllocated is returned when no more ports are available
ErrAllPortsAllocated = errors.New("all ports are allocated")
- ErrUnknownProtocol = errors.New("unknown protocol")
- defaultIP = net.ParseIP("0.0.0.0")
+ // ErrUnknownProtocol is returned when an unknown protocol was specified
+ ErrUnknownProtocol = errors.New("unknown protocol")
+ defaultIP = net.ParseIP("0.0.0.0")
+ once sync.Once
+ instance *PortAllocator
+ createInstance = func() { instance = newInstance() }
)
+// ErrPortAlreadyAllocated is the returned error information when a requested port is already being used
type ErrPortAlreadyAllocated struct {
ip string
port int
}
-func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
+func newErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
return ErrPortAlreadyAllocated{
ip: ip,
port: port,
}
}
+// IP returns the address to which the used port is associated
func (e ErrPortAlreadyAllocated) IP() string {
return e.ip
}
+// Port returns the value of the already used port
func (e ErrPortAlreadyAllocated) Port() int {
return e.port
}
+// IPPort returns the address and the port in the form ip:port
func (e ErrPortAlreadyAllocated) IPPort() string {
return fmt.Sprintf("%s:%d", e.ip, e.port)
}
+// Error is the implementation of error.Error interface
func (e ErrPortAlreadyAllocated) Error() string {
return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
}
type (
+ // PortAllocator manages the transport ports database
PortAllocator struct {
mutex sync.Mutex
ipMap ipMapping
@@ -67,7 +80,18 @@ type (
protoMap map[string]*portMap
)
-func New() *PortAllocator {
+// Get returns the default instance of PortAllocator
+func Get() *PortAllocator {
+ // Port Allocator is a singleton
+ // Note: Long term solution will be each PortAllocator will have access to
+ // the OS so that it can have up to date view of the OS port allocation.
+ // When this happens singleton behavior will be removed. Clients do not
+ // need to worry about this, they will not see a change in behavior.
+ once.Do(createInstance)
+ return instance
+}
+
+func newInstance() *PortAllocator {
start, end, err := getDynamicPortRange()
if err != nil {
logrus.Warn(err)
@@ -98,7 +122,7 @@ func getDynamicPortRange() (start int, end int, err error) {
}
// RequestPort requests new port from global ports pool for specified ip and proto.
-// If port is 0 it returns first free port. Otherwise it cheks port availability
+// If port is 0 it returns first free port. Otherwise it checks port availability
// in pool and return that port or error if port is already busy.
func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) {
p.mutex.Lock()
@@ -127,7 +151,7 @@ func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, err
mapping.p[port] = struct{}{}
return port, nil
}
- return 0, NewErrPortAlreadyAllocated(ipstr, port)
+ return 0, newErrPortAlreadyAllocated(ipstr, port)
}
port, err := mapping.findPort()
diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
similarity index 91%
rename from daemon/networkdriver/portallocator/portallocator_test.go
rename to vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
index 17201235e0..20756494af 100644
--- a/daemon/networkdriver/portallocator/portallocator_test.go
+++ b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
@@ -3,10 +3,17 @@ package portallocator
import (
"net"
"testing"
+
+ _ "github.com/docker/libnetwork/netutils"
)
+func resetPortAllocator() {
+ instance = newInstance()
+}
+
func TestRequestNewPort(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
port, err := p.RequestPort(defaultIP, "tcp", 0)
if err != nil {
@@ -19,19 +26,21 @@ func TestRequestNewPort(t *testing.T) {
}
func TestRequestSpecificPort(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
t.Fatal(err)
}
+
if port != 5000 {
t.Fatalf("Expected port 5000 got %d", port)
}
}
func TestReleasePort(t *testing.T) {
- p := New()
+ p := Get()
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
@@ -47,7 +56,8 @@ func TestReleasePort(t *testing.T) {
}
func TestReuseReleasedPort(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
@@ -68,7 +78,8 @@ func TestReuseReleasedPort(t *testing.T) {
}
func TestReleaseUnreadledPort(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
port, err := p.RequestPort(defaultIP, "tcp", 5000)
if err != nil {
@@ -88,13 +99,14 @@ func TestReleaseUnreadledPort(t *testing.T) {
}
func TestUnknowProtocol(t *testing.T) {
- if _, err := New().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
+ if _, err := Get().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err)
}
}
func TestAllocateAllPorts(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
for i := 0; i <= p.End-p.Begin; i++ {
port, err := p.RequestPort(defaultIP, "tcp", 0)
@@ -144,7 +156,8 @@ func TestAllocateAllPorts(t *testing.T) {
}
func BenchmarkAllocatePorts(b *testing.B) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
for i := 0; i < b.N; i++ {
for i := 0; i <= p.End-p.Begin; i++ {
@@ -162,7 +175,8 @@ func BenchmarkAllocatePorts(b *testing.B) {
}
func TestPortAllocation(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
ip := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
@@ -223,7 +237,8 @@ func TestPortAllocation(t *testing.T) {
}
func TestNoDuplicateBPR(t *testing.T) {
- p := New()
+ p := Get()
+ defer resetPortAllocator()
if port, err := p.RequestPort(defaultIP, "tcp", p.Begin); err != nil {
t.Fatal(err)
diff --git a/daemon/networkdriver/portmapper/mapper.go b/vendor/src/github.com/docker/libnetwork/portmapper/mapper.go
similarity index 71%
rename from daemon/networkdriver/portmapper/mapper.go
rename to vendor/src/github.com/docker/libnetwork/portmapper/mapper.go
index 09952ba35b..ac32f66ef1 100644
--- a/daemon/networkdriver/portmapper/mapper.go
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mapper.go
@@ -7,25 +7,29 @@ import (
"sync"
"github.com/Sirupsen/logrus"
- "github.com/docker/docker/daemon/networkdriver/portallocator"
- "github.com/docker/docker/pkg/iptables"
+ "github.com/docker/libnetwork/iptables"
+ "github.com/docker/libnetwork/portallocator"
)
type mapping struct {
proto string
- userlandProxy UserlandProxy
+ userlandProxy userlandProxy
host net.Addr
container net.Addr
}
-var NewProxy = NewProxyCommand
+var newProxy = newProxyCommand
var (
+ // ErrUnknownBackendAddressType refers to an unknown container or unsupported address type
ErrUnknownBackendAddressType = errors.New("unknown container address type not supported")
- ErrPortMappedForIP = errors.New("port is already mapped to ip")
- ErrPortNotMapped = errors.New("port is not mapped")
+ // ErrPortMappedForIP refers to a port already mapped to an ip address
+ ErrPortMappedForIP = errors.New("port is already mapped to ip")
+ // ErrPortNotMapped refers to an unmapped port
+ ErrPortNotMapped = errors.New("port is not mapped")
)
+// PortMapper manages the network address translation
type PortMapper struct {
chain *iptables.Chain
@@ -36,10 +40,12 @@ type PortMapper struct {
Allocator *portallocator.PortAllocator
}
+// New returns a new instance of PortMapper
func New() *PortMapper {
- return NewWithPortAllocator(portallocator.New())
+ return NewWithPortAllocator(portallocator.Get())
}
+// NewWithPortAllocator returns a new instance of PortMapper which will use the specified PortAllocator
func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
return &PortMapper{
currentMappings: make(map[string]*mapping),
@@ -47,11 +53,13 @@ func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
}
}
+// SetIptablesChain sets the specified chain into portmapper
func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) {
pm.chain = c
}
-func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) {
+// Map maps the specified container transport address to the host's network address and transport port
+func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int, useProxy bool) (host net.Addr, err error) {
pm.lock.Lock()
defer pm.lock.Unlock()
@@ -59,7 +67,6 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host
m *mapping
proto string
allocatedHostPort int
- proxy UserlandProxy
)
switch container.(type) {
@@ -75,7 +82,11 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host
container: container,
}
- proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
+ if useProxy {
+ m.userlandProxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
+ } else {
+ m.userlandProxy = newDummyProxy(proto, hostIP, allocatedHostPort)
+ }
case *net.UDPAddr:
proto = "udp"
if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
@@ -88,7 +99,11 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host
container: container,
}
- proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
+ if useProxy {
+ m.userlandProxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
+ } else {
+ m.userlandProxy = newDummyProxy(proto, hostIP, allocatedHostPort)
+ }
default:
return nil, ErrUnknownBackendAddressType
}
@@ -112,7 +127,7 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host
cleanup := func() error {
// need to undo the iptables rules before we return
- proxy.Stop()
+ m.userlandProxy.Stop()
pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
return err
@@ -121,29 +136,18 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host
return nil
}
- if err := proxy.Start(); err != nil {
+ if err := m.userlandProxy.Start(); err != nil {
if err := cleanup(); err != nil {
return nil, fmt.Errorf("Error during port allocation cleanup: %v", err)
}
return nil, err
}
- m.userlandProxy = proxy
+
pm.currentMappings[key] = m
return m.host, nil
}
-// re-apply all port mappings
-func (pm *PortMapper) ReMapAll() {
- logrus.Debugln("Re-applying all port mappings.")
- for _, data := range pm.currentMappings {
- containerIP, containerPort := getIPAndPort(data.container)
- hostIP, hostPort := getIPAndPort(data.host)
- if err := pm.forward(iptables.Append, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
- logrus.Errorf("Error on iptables add: %s", err)
- }
- }
-}
-
+// Unmap removes stored mapping for the specified host transport address
func (pm *PortMapper) Unmap(host net.Addr) error {
pm.lock.Lock()
defer pm.lock.Unlock()
@@ -154,7 +158,9 @@ func (pm *PortMapper) Unmap(host net.Addr) error {
return ErrPortNotMapped
}
- data.userlandProxy.Stop()
+ if data.userlandProxy != nil {
+ data.userlandProxy.Stop()
+ }
delete(pm.currentMappings, key)
diff --git a/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go b/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go
new file mode 100644
index 0000000000..635723de8c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go
@@ -0,0 +1,271 @@
+package portmapper
+
+import (
+ "net"
+ "strings"
+ "testing"
+
+ "github.com/docker/libnetwork/iptables"
+ _ "github.com/docker/libnetwork/netutils"
+)
+
+func init() {
+ // override this func to mock out the proxy server
+ newProxy = newMockProxyCommand
+}
+
+func TestSetIptablesChain(t *testing.T) {
+ pm := New()
+
+ c := &iptables.Chain{
+ Name: "TEST",
+ Bridge: "192.168.1.1",
+ }
+
+ if pm.chain != nil {
+ t.Fatal("chain should be nil at init")
+ }
+
+ pm.SetIptablesChain(c)
+ if pm.chain == nil {
+ t.Fatal("chain should not be nil after set")
+ }
+}
+
+func TestMapTCPPorts(t *testing.T) {
+ pm := New()
+ dstIP1 := net.ParseIP("192.168.0.1")
+ dstIP2 := net.ParseIP("192.168.0.2")
+ dstAddr1 := &net.TCPAddr{IP: dstIP1, Port: 80}
+ dstAddr2 := &net.TCPAddr{IP: dstIP2, Port: 80}
+
+ srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+ srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
+
+ addrEqual := func(addr1, addr2 net.Addr) bool {
+ return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+ }
+
+ if host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ } else if !addrEqual(dstAddr1, host) {
+ t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+ dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
+ }
+
+ if _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {
+ t.Fatalf("Port is in use - mapping should have failed")
+ }
+
+ if _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {
+ t.Fatalf("Port is in use - mapping should have failed")
+ }
+
+ if _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ }
+
+ if pm.Unmap(dstAddr1) != nil {
+ t.Fatalf("Failed to release port")
+ }
+
+ if pm.Unmap(dstAddr2) != nil {
+ t.Fatalf("Failed to release port")
+ }
+
+ if pm.Unmap(dstAddr2) == nil {
+ t.Fatalf("Port already released, but no error reported")
+ }
+}
+
+func TestGetUDPKey(t *testing.T) {
+ addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
+
+ key := getKey(addr)
+
+ if expected := "192.168.1.5:53/udp"; key != expected {
+ t.Fatalf("expected key %s got %s", expected, key)
+ }
+}
+
+func TestGetTCPKey(t *testing.T) {
+ addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80}
+
+ key := getKey(addr)
+
+ if expected := "192.168.1.5:80/tcp"; key != expected {
+ t.Fatalf("expected key %s got %s", expected, key)
+ }
+}
+
+func TestGetUDPIPAndPort(t *testing.T) {
+ addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
+
+ ip, port := getIPAndPort(addr)
+ if expected := "192.168.1.5"; ip.String() != expected {
+ t.Fatalf("expected ip %s got %s", expected, ip)
+ }
+
+ if ep := 53; port != ep {
+ t.Fatalf("expected port %d got %d", ep, port)
+ }
+}
+
+func TestMapUDPPorts(t *testing.T) {
+ pm := New()
+ dstIP1 := net.ParseIP("192.168.0.1")
+ dstIP2 := net.ParseIP("192.168.0.2")
+ dstAddr1 := &net.UDPAddr{IP: dstIP1, Port: 80}
+ dstAddr2 := &net.UDPAddr{IP: dstIP2, Port: 80}
+
+ srcAddr1 := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+ srcAddr2 := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
+
+ addrEqual := func(addr1, addr2 net.Addr) bool {
+ return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+ }
+
+ if host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ } else if !addrEqual(dstAddr1, host) {
+ t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+ dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
+ }
+
+ if _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {
+ t.Fatalf("Port is in use - mapping should have failed")
+ }
+
+ if _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {
+ t.Fatalf("Port is in use - mapping should have failed")
+ }
+
+ if _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ }
+
+ if pm.Unmap(dstAddr1) != nil {
+ t.Fatalf("Failed to release port")
+ }
+
+ if pm.Unmap(dstAddr2) != nil {
+ t.Fatalf("Failed to release port")
+ }
+
+ if pm.Unmap(dstAddr2) == nil {
+ t.Fatalf("Port already released, but no error reported")
+ }
+}
+
+func TestMapAllPortsSingleInterface(t *testing.T) {
+ pm := New()
+ dstIP1 := net.ParseIP("0.0.0.0")
+ srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+ hosts := []net.Addr{}
+ var host net.Addr
+ var err error
+
+ defer func() {
+ for _, val := range hosts {
+ pm.Unmap(val)
+ }
+ }()
+
+ for i := 0; i < 10; i++ {
+ start, end := pm.Allocator.Begin, pm.Allocator.End
+ for i := start; i < end; i++ {
+ if host, err = pm.Map(srcAddr1, dstIP1, 0, true); err != nil {
+ t.Fatal(err)
+ }
+
+ hosts = append(hosts, host)
+ }
+
+ if _, err := pm.Map(srcAddr1, dstIP1, start, true); err == nil {
+ t.Fatalf("Port %d should be bound but is not", start)
+ }
+
+ for _, val := range hosts {
+ if err := pm.Unmap(val); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ hosts = []net.Addr{}
+ }
+}
+
+func TestMapTCPDummyListen(t *testing.T) {
+ pm := New()
+ dstIP := net.ParseIP("0.0.0.0")
+ dstAddr := &net.TCPAddr{IP: dstIP, Port: 80}
+
+ // no-op for dummy
+ srcAddr := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+ addrEqual := func(addr1, addr2 net.Addr) bool {
+ return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+ }
+
+ if host, err := pm.Map(srcAddr, dstIP, 80, false); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ } else if !addrEqual(dstAddr, host) {
+ t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+ dstAddr.String(), dstAddr.Network(), host.String(), host.Network())
+ }
+ if _, err := net.Listen("tcp", "0.0.0.0:80"); err == nil {
+ t.Fatal("Listen on mapped port without proxy should fail")
+ } else {
+ if !strings.Contains(err.Error(), "address already in use") {
+ t.Fatalf("Error should be about address already in use, got %v", err)
+ }
+ }
+ if _, err := net.Listen("tcp", "0.0.0.0:81"); err != nil {
+ t.Fatal(err)
+ }
+ if host, err := pm.Map(srcAddr, dstIP, 81, false); err == nil {
+ t.Fatalf("Bound port shouldn't be allocated, but it was on: %v", host)
+ } else {
+ if !strings.Contains(err.Error(), "address already in use") {
+ t.Fatalf("Error should be about address already in use, got %v", err)
+ }
+ }
+}
+
+func TestMapUDPDummyListen(t *testing.T) {
+ pm := New()
+ dstIP := net.ParseIP("0.0.0.0")
+ dstAddr := &net.UDPAddr{IP: dstIP, Port: 80}
+
+ // no-op for dummy
+ srcAddr := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+ addrEqual := func(addr1, addr2 net.Addr) bool {
+ return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+ }
+
+ if host, err := pm.Map(srcAddr, dstIP, 80, false); err != nil {
+ t.Fatalf("Failed to allocate port: %s", err)
+ } else if !addrEqual(dstAddr, host) {
+ t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+ dstAddr.String(), dstAddr.Network(), host.String(), host.Network())
+ }
+ if _, err := net.ListenUDP("udp", &net.UDPAddr{IP: dstIP, Port: 80}); err == nil {
+ t.Fatal("Listen on mapped port without proxy should fail")
+ } else {
+ if !strings.Contains(err.Error(), "address already in use") {
+ t.Fatalf("Error should be about address already in use, got %v", err)
+ }
+ }
+ if _, err := net.ListenUDP("udp", &net.UDPAddr{IP: dstIP, Port: 81}); err != nil {
+ t.Fatal(err)
+ }
+ if host, err := pm.Map(srcAddr, dstIP, 81, false); err == nil {
+ t.Fatalf("Bound port shouldn't be allocated, but it was on: %v", host)
+ } else {
+ if !strings.Contains(err.Error(), "address already in use") {
+ t.Fatalf("Error should be about address already in use, got %v", err)
+ }
+ }
+}
diff --git a/daemon/networkdriver/portmapper/mock_proxy.go b/vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go
similarity index 63%
rename from daemon/networkdriver/portmapper/mock_proxy.go
rename to vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go
index 253ce83112..29b1605889 100644
--- a/daemon/networkdriver/portmapper/mock_proxy.go
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go
@@ -2,7 +2,7 @@ package portmapper
import "net"
-func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
+func newMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) userlandProxy {
return &mockProxyCommand{}
}
diff --git a/daemon/networkdriver/portmapper/proxy.go b/vendor/src/github.com/docker/libnetwork/portmapper/proxy.go
similarity index 74%
rename from daemon/networkdriver/portmapper/proxy.go
rename to vendor/src/github.com/docker/libnetwork/portmapper/proxy.go
index 80b0027c70..530703b259 100644
--- a/daemon/networkdriver/portmapper/proxy.go
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/proxy.go
@@ -3,6 +3,7 @@ package portmapper
import (
"flag"
"fmt"
+ "io"
"io/ioutil"
"log"
"net"
@@ -23,7 +24,7 @@ func init() {
reexec.Register(userlandProxyCommandName, execProxy)
}
-type UserlandProxy interface {
+type userlandProxy interface {
Start() error
Stop() error
}
@@ -84,14 +85,14 @@ func handleStopSignals(p proxy.Proxy) {
s := make(chan os.Signal, 10)
signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP)
- for range s {
+ for _ = range s {
p.Close()
os.Exit(0)
}
}
-func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
+func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) userlandProxy {
args := []string{
userlandProxyCommandName,
"-proto", proto,
@@ -159,3 +160,50 @@ func (p *proxyCommand) Stop() error {
}
return nil
}
+
+// dummyProxy just listen on some port, it is needed to prevent accidental
+// port allocations on bound port, because without userland proxy we using
+// iptables rules and not net.Listen
+type dummyProxy struct {
+ listener io.Closer
+ addr net.Addr
+}
+
+func newDummyProxy(proto string, hostIP net.IP, hostPort int) userlandProxy {
+ switch proto {
+ case "tcp":
+ addr := &net.TCPAddr{IP: hostIP, Port: hostPort}
+ return &dummyProxy{addr: addr}
+ case "udp":
+ addr := &net.UDPAddr{IP: hostIP, Port: hostPort}
+ return &dummyProxy{addr: addr}
+ }
+ return nil
+}
+
+func (p *dummyProxy) Start() error {
+ switch addr := p.addr.(type) {
+ case *net.TCPAddr:
+ l, err := net.ListenTCP("tcp", addr)
+ if err != nil {
+ return err
+ }
+ p.listener = l
+ case *net.UDPAddr:
+ l, err := net.ListenUDP("udp", addr)
+ if err != nil {
+ return err
+ }
+ p.listener = l
+ default:
+ return fmt.Errorf("Unknown addr type: %T", p.addr)
+ }
+ return nil
+}
+
+func (p *dummyProxy) Stop() error {
+ if p.listener != nil {
+ return p.listener.Close()
+ }
+ return nil
+}
diff --git a/pkg/resolvconf/README.md b/vendor/src/github.com/docker/libnetwork/resolvconf/README.md
similarity index 100%
rename from pkg/resolvconf/README.md
rename to vendor/src/github.com/docker/libnetwork/resolvconf/README.md
diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
new file mode 100644
index 0000000000..d581a1913d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
@@ -0,0 +1,17 @@
+package dns
+
+import (
+ "regexp"
+)
+
+// IPLocalhost is a regex patter for localhost IP address range.
+const IPLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))`
+
+var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
+
+// IsLocalhost returns true if ip matches the localhost IP regular expression.
+// Used for determining if nameserver settings are being passed which are
+// localhost addresses
+func IsLocalhost(ip string) bool {
+ return localhostIPRegexp.MatchString(ip)
+}
diff --git a/pkg/resolvconf/resolvconf.go b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go
similarity index 91%
rename from pkg/resolvconf/resolvconf.go
rename to vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go
index 5707b16b7f..ebe3b71aa4 100644
--- a/pkg/resolvconf/resolvconf.go
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go
@@ -10,6 +10,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/libnetwork/resolvconf/dns"
)
var (
@@ -24,10 +25,8 @@ var (
// For readability and sufficiency for Docker purposes this seemed more reasonable than a
// 1000+ character regexp with exact and complete IPv6 validation
ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})`
- ipLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))`
- localhostIPRegexp = regexp.MustCompile(ipLocalhost)
- localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`)
+ localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
@@ -81,7 +80,7 @@ func GetLastModified() ([]byte, string) {
return lastModified.contents, lastModified.sha256
}
-// FilterResolvDns cleans up the config in resolvConf. It has two main jobs:
+// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs:
// 1. It looks for localhost (127.*|::1) entries in the provided
// resolv.conf, removing local nameserver entries, and, if the resulting
// cleaned config has no defined nameservers left, adds default DNS entries
@@ -89,7 +88,7 @@ func GetLastModified() ([]byte, string) {
// code will remove all IPv6 nameservers if it is not enabled for containers
//
// It returns a boolean to notify the caller if changes were made at all
-func FilterResolvDns(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) {
+func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) {
changed := false
cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
// if IPv6 is not enabled, also clean out any IPv6 address nameserver
@@ -128,13 +127,6 @@ func getLines(input []byte, commentMarker []byte) [][]byte {
return output
}
-// IsLocalhost returns true if ip matches the localhost IP regular expression.
-// Used for determining if nameserver settings are being passed which are
-// localhost addresses
-func IsLocalhost(ip string) bool {
- return localhostIPRegexp.MatchString(ip)
-}
-
// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
func GetNameservers(resolvConf []byte) []string {
nameservers := []string{}
diff --git a/pkg/resolvconf/resolvconf_test.go b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go
similarity index 91%
rename from pkg/resolvconf/resolvconf_test.go
rename to vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go
index b0647e7833..a21c7afb3e 100644
--- a/pkg/resolvconf/resolvconf_test.go
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go
@@ -5,6 +5,8 @@ import (
"io/ioutil"
"os"
"testing"
+
+ _ "github.com/docker/libnetwork/netutils"
)
func TestGet(t *testing.T) {
@@ -160,42 +162,42 @@ func TestBuildWithZeroLengthDomainSearch(t *testing.T) {
func TestFilterResolvDns(t *testing.T) {
ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n"
- if result, _ := FilterResolvDns([]byte(ns0), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns0), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
}
ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
}
ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
}
ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
}
ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
}
ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
}
@@ -203,7 +205,7 @@ func TestFilterResolvDns(t *testing.T) {
// with IPv6 disabled (false param), the IPv6 nameserver should be removed
ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result))
}
@@ -212,7 +214,7 @@ func TestFilterResolvDns(t *testing.T) {
// with IPv6 enabled, the IPv6 nameserver should be preserved
ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n"
ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
- if result, _ := FilterResolvDns([]byte(ns1), true); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result))
}
@@ -221,7 +223,7 @@ func TestFilterResolvDns(t *testing.T) {
// with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added
ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844"
ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
- if result, _ := FilterResolvDns([]byte(ns1), true); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
}
@@ -230,7 +232,7 @@ func TestFilterResolvDns(t *testing.T) {
// with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added
ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4"
ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
- if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
+ if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
if ns0 != string(result) {
t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go b/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go
new file mode 100644
index 0000000000..cae77890fd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go
@@ -0,0 +1,81 @@
+package sandbox
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+func configureInterface(iface netlink.Link, settings *Interface) error {
+ ifaceName := iface.Attrs().Name
+ ifaceConfigurators := []struct {
+ Fn func(netlink.Link, *Interface) error
+ ErrMessage string
+ }{
+ {setInterfaceName, fmt.Sprintf("error renaming interface %q to %q", ifaceName, settings.DstName)},
+ {setInterfaceIP, fmt.Sprintf("error setting interface %q IP to %q", ifaceName, settings.Address)},
+ {setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %q", ifaceName, settings.AddressIPv6)},
+ }
+
+ for _, config := range ifaceConfigurators {
+ if err := config.Fn(iface, settings); err != nil {
+ return fmt.Errorf("%s: %v", config.ErrMessage, err)
+ }
+ }
+ return nil
+}
+
+func programGateway(path string, gw net.IP) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := netns.Get()
+ if err != nil {
+ return err
+ }
+ defer origns.Close()
+
+ f, err := os.OpenFile(path, os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed get network namespace %q: %v", path, err)
+ }
+ defer f.Close()
+
+ nsFD := f.Fd()
+ if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ return err
+ }
+ defer netns.Set(origns)
+
+ gwRoutes, err := netlink.RouteGet(gw)
+ if err != nil {
+ return fmt.Errorf("route for the gateway could not be found: %v", err)
+ }
+
+ return netlink.RouteAdd(&netlink.Route{
+ Scope: netlink.SCOPE_UNIVERSE,
+ LinkIndex: gwRoutes[0].LinkIndex,
+ Gw: gw,
+ })
+}
+
+func setInterfaceIP(iface netlink.Link, settings *Interface) error {
+ ipAddr := &netlink.Addr{IPNet: settings.Address, Label: ""}
+ return netlink.AddrAdd(iface, ipAddr)
+}
+
+func setInterfaceIPv6(iface netlink.Link, settings *Interface) error {
+ if settings.AddressIPv6 == nil {
+ return nil
+ }
+ ipAddr := &netlink.Addr{IPNet: settings.AddressIPv6, Label: ""}
+ return netlink.AddrAdd(iface, ipAddr)
+}
+
+func setInterfaceName(iface netlink.Link, settings *Interface) error {
+ return netlink.LinkSetName(iface, settings.DstName)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go b/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go
new file mode 100644
index 0000000000..17881f1404
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go
@@ -0,0 +1,273 @@
+package sandbox
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "sync"
+ "syscall"
+
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+const prefix = "/var/run/docker/netns"
+
+var once sync.Once
+
+// The networkNamespace type is the linux implementation of the Sandbox
+// interface. It represents a linux network namespace, and moves an interface
+// into it when called on method AddInterface or sets the gateway etc.
+type networkNamespace struct {
+ path string
+ sinfo *Info
+ nextIfIndex int
+ sync.Mutex
+}
+
+func createBasePath() {
+ err := os.MkdirAll(prefix, 0644)
+ if err != nil && !os.IsExist(err) {
+ panic("Could not create net namespace path directory")
+ }
+}
+
+// GenerateKey generates a sandbox key based on the passed
+// container id.
+func GenerateKey(containerID string) string {
+ maxLen := 12
+ if len(containerID) < maxLen {
+ maxLen = len(containerID)
+ }
+
+ return prefix + "/" + containerID[:maxLen]
+}
+
+// NewSandbox provides a new sandbox instance created in an os specific way
+// provided a key which uniquely identifies the sandbox
+func NewSandbox(key string, osCreate bool) (Sandbox, error) {
+ info, err := createNetworkNamespace(key, osCreate)
+ if err != nil {
+ return nil, err
+ }
+
+ return &networkNamespace{path: key, sinfo: info}, nil
+}
+
+func createNetworkNamespace(path string, osCreate bool) (*Info, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := netns.Get()
+ if err != nil {
+ return nil, err
+ }
+ defer origns.Close()
+
+ if err := createNamespaceFile(path); err != nil {
+ return nil, err
+ }
+
+ if osCreate {
+ defer netns.Set(origns)
+ newns, err := netns.New()
+ if err != nil {
+ return nil, err
+ }
+ defer newns.Close()
+
+ if err := loopbackUp(); err != nil {
+ return nil, err
+ }
+ }
+
+ procNet := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid())
+
+ if err := syscall.Mount(procNet, path, "bind", syscall.MS_BIND, ""); err != nil {
+ return nil, err
+ }
+
+ interfaces := []*Interface{}
+ info := &Info{Interfaces: interfaces}
+ return info, nil
+}
+
+func cleanupNamespaceFile(path string) {
+ if _, err := os.Stat(path); err == nil {
+ n := &networkNamespace{path: path}
+ n.Destroy()
+ }
+}
+
+func createNamespaceFile(path string) (err error) {
+ var f *os.File
+
+ once.Do(createBasePath)
+ // cleanup namespace file if it already exists because of a previous ungraceful exit.
+ cleanupNamespaceFile(path)
+ if f, err = os.Create(path); err == nil {
+ f.Close()
+ }
+ return err
+}
+
+func loopbackUp() error {
+ iface, err := netlink.LinkByName("lo")
+ if err != nil {
+ return err
+ }
+ return netlink.LinkSetUp(iface)
+}
+
+func (n *networkNamespace) RemoveInterface(i *Interface) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := netns.Get()
+ if err != nil {
+ return err
+ }
+ defer origns.Close()
+
+ f, err := os.OpenFile(n.path, os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+ }
+ defer f.Close()
+
+ nsFD := f.Fd()
+ if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ return err
+ }
+ defer netns.Set(origns)
+
+ // Find the network inteerface identified by the DstName attribute.
+ iface, err := netlink.LinkByName(i.DstName)
+ if err != nil {
+ return err
+ }
+
+ // Down the interface before configuring
+ if err := netlink.LinkSetDown(iface); err != nil {
+ return err
+ }
+
+ err = netlink.LinkSetName(iface, i.SrcName)
+ if err != nil {
+ fmt.Println("LinkSetName failed: ", err)
+ return err
+ }
+
+ // Move the network interface to caller namespace.
+ if err := netlink.LinkSetNsFd(iface, int(origns)); err != nil {
+ fmt.Println("LinkSetNsPid failed: ", err)
+ return err
+ }
+
+ return nil
+}
+
+func (n *networkNamespace) AddInterface(i *Interface) error {
+ n.Lock()
+ i.DstName = fmt.Sprintf("%s%d", i.DstName, n.nextIfIndex)
+ n.nextIfIndex++
+ n.Unlock()
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := netns.Get()
+ if err != nil {
+ return err
+ }
+ defer origns.Close()
+
+ f, err := os.OpenFile(n.path, os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+ }
+ defer f.Close()
+
+ // Find the network interface identified by the SrcName attribute.
+ iface, err := netlink.LinkByName(i.SrcName)
+ if err != nil {
+ return err
+ }
+
+ // Move the network interface to the destination namespace.
+ nsFD := f.Fd()
+ if err := netlink.LinkSetNsFd(iface, int(nsFD)); err != nil {
+ return err
+ }
+
+ if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ return err
+ }
+ defer netns.Set(origns)
+
+ // Down the interface before configuring
+ if err := netlink.LinkSetDown(iface); err != nil {
+ return err
+ }
+
+ // Configure the interface now this is moved in the proper namespace.
+ if err := configureInterface(iface, i); err != nil {
+ return err
+ }
+
+ // Up the interface.
+ if err := netlink.LinkSetUp(iface); err != nil {
+ return err
+ }
+
+ n.Lock()
+ n.sinfo.Interfaces = append(n.sinfo.Interfaces, i)
+ n.Unlock()
+
+ return nil
+}
+
+func (n *networkNamespace) SetGateway(gw net.IP) error {
+ if len(gw) == 0 {
+ return nil
+ }
+
+ err := programGateway(n.path, gw)
+ if err == nil {
+ n.sinfo.Gateway = gw
+ }
+
+ return err
+}
+
+func (n *networkNamespace) SetGatewayIPv6(gw net.IP) error {
+ if len(gw) == 0 {
+ return nil
+ }
+
+ err := programGateway(n.path, gw)
+ if err == nil {
+ n.sinfo.GatewayIPv6 = gw
+ }
+
+ return err
+}
+
+func (n *networkNamespace) Interfaces() []*Interface {
+ return n.sinfo.Interfaces
+}
+
+func (n *networkNamespace) Key() string {
+ return n.path
+}
+
+func (n *networkNamespace) Destroy() error {
+ // Assuming no running process is executing in this network namespace,
+ // unmounting is sufficient to destroy it.
+ if err := syscall.Unmount(n.path, syscall.MNT_DETACH); err != nil {
+ return err
+ }
+
+ return os.Remove(n.path)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go
new file mode 100644
index 0000000000..9e104cabdc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go
@@ -0,0 +1,159 @@
+package sandbox
+
+import (
+ "net"
+
+ "github.com/docker/libnetwork/types"
+)
+
+// Sandbox represents a network sandbox, identified by a specific key. It
+// holds a list of Interfaces, routes etc, and more can be added dynamically.
+type Sandbox interface {
+ // The path where the network namespace is mounted.
+ Key() string
+
+ // The collection of Interface previously added with the AddInterface
+ // method. Note that this doesn't incude network interfaces added in any
+ // other way (such as the default loopback interface which are automatically
+ // created on creation of a sandbox).
+ Interfaces() []*Interface
+
+ // Add an existing Interface to this sandbox. The operation will rename
+ // from the Interface SrcName to DstName as it moves, and reconfigure the
+ // interface according to the specified settings. The caller is expected
+ // to only provide a prefix for DstName. The AddInterface api will auto-generate
+ // an appropriate suffix for the DstName to disambiguate.
+ AddInterface(*Interface) error
+
+ // Remove an interface from the sandbox by renamin to original name
+ // and moving it out of the sandbox.
+ RemoveInterface(*Interface) error
+
+ // Set default IPv4 gateway for the sandbox
+ SetGateway(gw net.IP) error
+
+ // Set default IPv6 gateway for the sandbox
+ SetGatewayIPv6(gw net.IP) error
+
+ // Destroy the sandbox
+ Destroy() error
+}
+
+// Info represents all possible information that
+// the driver wants to place in the sandbox which includes
+// interfaces, routes and gateway
+type Info struct {
+ Interfaces []*Interface
+
+ // IPv4 gateway for the sandbox.
+ Gateway net.IP
+
+ // IPv6 gateway for the sandbox.
+ GatewayIPv6 net.IP
+
+ // TODO: Add routes and ip tables etc.
+}
+
+// Interface represents the settings and identity of a network device. It is
+// used as a return type for Network.Link, and it is common practice for the
+// caller to use this information when moving interface SrcName from host
+// namespace to DstName in a different net namespace with the appropriate
+// network settings.
+type Interface struct {
+ // The name of the interface in the origin network namespace.
+ SrcName string
+
+ // The name that will be assigned to the interface once moves inside a
+ // network namespace. When the caller passes in a DstName, it is only
+ // expected to pass a prefix. The name will modified with an appropriately
+ // auto-generated suffix.
+ DstName string
+
+ // IPv4 address for the interface.
+ Address *net.IPNet
+
+ // IPv6 address for the interface.
+ AddressIPv6 *net.IPNet
+}
+
+// GetCopy returns a copy of this Interface structure
+func (i *Interface) GetCopy() *Interface {
+ return &Interface{
+ SrcName: i.SrcName,
+ DstName: i.DstName,
+ Address: types.GetIPNetCopy(i.Address),
+ AddressIPv6: types.GetIPNetCopy(i.AddressIPv6),
+ }
+}
+
+// Equal checks if this instance of Interface is equal to the passed one
+func (i *Interface) Equal(o *Interface) bool {
+ if i == o {
+ return true
+ }
+
+ if o == nil {
+ return false
+ }
+
+ if i.SrcName != o.SrcName || i.DstName != o.DstName {
+ return false
+ }
+
+ if !types.CompareIPNet(i.Address, o.Address) {
+ return false
+ }
+
+ if !types.CompareIPNet(i.AddressIPv6, o.AddressIPv6) {
+ return false
+ }
+
+ return true
+}
+
+// GetCopy returns a copy of this SandboxInfo structure
+func (s *Info) GetCopy() *Info {
+ list := make([]*Interface, len(s.Interfaces))
+ for i, iface := range s.Interfaces {
+ list[i] = iface.GetCopy()
+ }
+ gw := types.GetIPCopy(s.Gateway)
+ gw6 := types.GetIPCopy(s.GatewayIPv6)
+
+ return &Info{Interfaces: list, Gateway: gw, GatewayIPv6: gw6}
+}
+
+// Equal checks if this instance of SandboxInfo is equal to the passed one
+func (s *Info) Equal(o *Info) bool {
+ if s == o {
+ return true
+ }
+
+ if o == nil {
+ return false
+ }
+
+ if !s.Gateway.Equal(o.Gateway) {
+ return false
+ }
+
+ if !s.GatewayIPv6.Equal(o.GatewayIPv6) {
+ return false
+ }
+
+ if (s.Interfaces == nil && o.Interfaces != nil) ||
+ (s.Interfaces != nil && o.Interfaces == nil) ||
+ (len(s.Interfaces) != len(o.Interfaces)) {
+ return false
+ }
+
+ // Note: At the moment, the two lists must be in the same order
+ for i := 0; i < len(s.Interfaces); i++ {
+ if !s.Interfaces[i].Equal(o.Interfaces[i]) {
+ return false
+ }
+ }
+
+ return true
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go
new file mode 100644
index 0000000000..d4af061f91
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go
@@ -0,0 +1,139 @@
+package sandbox
+
+import (
+ "net"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/docker/libnetwork/netutils"
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+const (
+ vethName1 = "wierdlongname1"
+ vethName2 = "wierdlongname2"
+ vethName3 = "wierdlongname3"
+ vethName4 = "wierdlongname4"
+ sboxIfaceName = "containername"
+)
+
+func newKey(t *testing.T) (string, error) {
+ name, err := netutils.GenerateRandomName("netns", 12)
+ if err != nil {
+ return "", err
+ }
+
+ name = filepath.Join("/tmp", name)
+ if _, err := os.Create(name); err != nil {
+ return "", err
+ }
+
+ return name, nil
+}
+
+func newInfo(t *testing.T) (*Info, error) {
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{Name: vethName1, TxQLen: 0},
+ PeerName: vethName2}
+ if err := netlink.LinkAdd(veth); err != nil {
+ return nil, err
+ }
+
+ // Store the sandbox side pipe interface
+ // This is needed for cleanup on DeleteEndpoint()
+ intf1 := &Interface{}
+ intf1.SrcName = vethName2
+ intf1.DstName = sboxIfaceName
+
+ ip4, addr, err := net.ParseCIDR("192.168.1.100/24")
+ if err != nil {
+ return nil, err
+ }
+ intf1.Address = addr
+ intf1.Address.IP = ip4
+
+ // ip6, addrv6, err := net.ParseCIDR("2001:DB8::ABCD/48")
+ ip6, addrv6, err := net.ParseCIDR("fe80::2/64")
+ if err != nil {
+ return nil, err
+ }
+ intf1.AddressIPv6 = addrv6
+ intf1.AddressIPv6.IP = ip6
+
+ veth = &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{Name: vethName3, TxQLen: 0},
+ PeerName: vethName4}
+
+ if err := netlink.LinkAdd(veth); err != nil {
+ return nil, err
+ }
+
+ intf2 := &Interface{}
+ intf2.SrcName = vethName4
+ intf2.DstName = sboxIfaceName
+
+ ip4, addr, err = net.ParseCIDR("192.168.2.100/24")
+ if err != nil {
+ return nil, err
+ }
+ intf2.Address = addr
+ intf2.Address.IP = ip4
+
+ // ip6, addrv6, err := net.ParseCIDR("2001:DB8::ABCD/48")
+ ip6, addrv6, err = net.ParseCIDR("fe80::3/64")
+ if err != nil {
+ return nil, err
+ }
+ intf2.AddressIPv6 = addrv6
+ intf2.AddressIPv6.IP = ip6
+
+ sinfo := &Info{Interfaces: []*Interface{intf1, intf2}}
+ sinfo.Gateway = net.ParseIP("192.168.1.1")
+ // sinfo.GatewayIPv6 = net.ParseIP("2001:DB8::1")
+ sinfo.GatewayIPv6 = net.ParseIP("fe80::1")
+
+ return sinfo, nil
+}
+
+func verifySandbox(t *testing.T, s Sandbox) {
+ _, ok := s.(*networkNamespace)
+ if !ok {
+ t.Fatalf("The sandox interface returned is not of type networkNamespace")
+ }
+
+ origns, err := netns.Get()
+ if err != nil {
+ t.Fatalf("Could not get the current netns: %v", err)
+ }
+ defer origns.Close()
+
+ f, err := os.OpenFile(s.Key(), os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("Failed top open network namespace path %q: %v", s.Key(), err)
+ }
+ defer f.Close()
+
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ nsFD := f.Fd()
+ if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+ t.Fatalf("Setting to the namespace pointed to by the sandbox %s failed: %v", s.Key(), err)
+ }
+ defer netns.Set(origns)
+
+ _, err = netlink.LinkByName(sboxIfaceName + "0")
+ if err != nil {
+ t.Fatalf("Could not find the interface %s inside the sandbox: %v", sboxIfaceName,
+ err)
+ }
+
+ _, err = netlink.LinkByName(sboxIfaceName + "1")
+ if err != nil {
+ t.Fatalf("Could not find the interface %s inside the sandbox: %v", sboxIfaceName,
+ err)
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go
new file mode 100644
index 0000000000..811af6d916
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go
@@ -0,0 +1,149 @@
+package sandbox
+
+import (
+ "net"
+ "testing"
+)
+
+func TestSandboxCreate(t *testing.T) {
+ key, err := newKey(t)
+ if err != nil {
+ t.Fatalf("Failed to obtain a key: %v", err)
+ }
+
+ s, err := NewSandbox(key, true)
+ if err != nil {
+ t.Fatalf("Failed to create a new sandbox: %v", err)
+ }
+
+ if s.Key() != key {
+ t.Fatalf("s.Key() returned %s. Expected %s", s.Key(), key)
+ }
+
+ info, err := newInfo(t)
+ if err != nil {
+ t.Fatalf("Failed to generate new sandbox info: %v", err)
+ }
+
+ for _, i := range info.Interfaces {
+ err = s.AddInterface(i)
+ if err != nil {
+ t.Fatalf("Failed to add interfaces to sandbox: %v", err)
+ }
+ }
+
+ err = s.SetGateway(info.Gateway)
+ if err != nil {
+ t.Fatalf("Failed to set gateway to sandbox: %v", err)
+ }
+
+ err = s.SetGatewayIPv6(info.GatewayIPv6)
+ if err != nil {
+ t.Fatalf("Failed to set ipv6 gateway to sandbox: %v", err)
+ }
+
+ verifySandbox(t, s)
+ s.Destroy()
+}
+
+func TestSandboxCreateTwice(t *testing.T) {
+ key, err := newKey(t)
+ if err != nil {
+ t.Fatalf("Failed to obtain a key: %v", err)
+ }
+
+ _, err = NewSandbox(key, true)
+ if err != nil {
+ t.Fatalf("Failed to create a new sandbox: %v", err)
+ }
+
+ // Create another sandbox with the same key to see if we handle it
+ // gracefully.
+ s, err := NewSandbox(key, true)
+ if err != nil {
+ t.Fatalf("Failed to create a new sandbox: %v", err)
+ }
+ s.Destroy()
+}
+
+func TestInterfaceEqual(t *testing.T) {
+ list := getInterfaceList()
+
+ if !list[0].Equal(list[0]) {
+ t.Fatalf("Interface.Equal() returned false negative")
+ }
+
+ if list[0].Equal(list[1]) {
+ t.Fatalf("Interface.Equal() returned false positive")
+ }
+
+ if list[0].Equal(list[1]) != list[1].Equal(list[0]) {
+ t.Fatalf("Interface.Equal() failed commutative check")
+ }
+}
+
+func TestSandboxInfoEqual(t *testing.T) {
+ si1 := &Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("192.168.1.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8889")}
+ si2 := &Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("172.18.255.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8888")}
+
+ if !si1.Equal(si1) {
+ t.Fatalf("Info.Equal() returned false negative")
+ }
+
+ if si1.Equal(si2) {
+ t.Fatalf("Info.Equal() returned false positive")
+ }
+
+ if si1.Equal(si2) != si2.Equal(si1) {
+ t.Fatalf("Info.Equal() failed commutative check")
+ }
+}
+
+func TestInterfaceCopy(t *testing.T) {
+ for _, iface := range getInterfaceList() {
+ cp := iface.GetCopy()
+
+ if !iface.Equal(cp) {
+ t.Fatalf("Failed to return a copy of Interface")
+ }
+
+ if iface == cp {
+ t.Fatalf("Failed to return a true copy of Interface")
+ }
+ }
+}
+
+func TestSandboxInfoCopy(t *testing.T) {
+ si := Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("192.168.1.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8889")}
+ cp := si.GetCopy()
+
+ if !si.Equal(cp) {
+ t.Fatalf("Failed to return a copy of Info")
+ }
+
+ if &si == cp {
+ t.Fatalf("Failed to return a true copy of Info")
+ }
+}
+
+func getInterfaceList() []*Interface {
+ _, netv4a, _ := net.ParseCIDR("192.168.30.1/24")
+ _, netv4b, _ := net.ParseCIDR("172.18.255.2/23")
+ _, netv6a, _ := net.ParseCIDR("2001:2345::abcd:8888/80")
+ _, netv6b, _ := net.ParseCIDR("2001:2345::abcd:8889/80")
+
+ return []*Interface{
+ &Interface{
+ SrcName: "veth1234567",
+ DstName: "eth0",
+ Address: netv4a,
+ AddressIPv6: netv6a,
+ },
+ &Interface{
+ SrcName: "veth7654321",
+ DstName: "eth1",
+ Address: netv4b,
+ AddressIPv6: netv6b,
+ },
+ }
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go
new file mode 100644
index 0000000000..aa116fda07
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package sandbox
+
+import "errors"
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+// NewSandbox provides a new sandbox instance created in an os specific way
+// provided a key which uniquely identifies the sandbox
+func NewSandbox(key string) (Sandbox, error) {
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go
new file mode 100644
index 0000000000..48dc2aa726
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go
@@ -0,0 +1,20 @@
+// +build !linux
+
+package sandbox
+
+import (
+ "errors"
+ "testing"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+func newKey(t *testing.T) (string, error) {
+ return nil, ErrNotImplemented
+}
+
+func verifySandbox(t *testing.T, s Sandbox) {
+ return
+}
diff --git a/vendor/src/github.com/docker/libnetwork/system.go b/vendor/src/github.com/docker/libnetwork/system.go
new file mode 100644
index 0000000000..7beec2876a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/system.go
@@ -0,0 +1,34 @@
+package libnetwork
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
+//
+// We need different setns values for the different platforms and arch
+// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
+var setNsMap = map[string]uintptr{
+ "linux/386": 346,
+ "linux/amd64": 308,
+ "linux/arm": 374,
+ "linux/ppc64": 350,
+ "linux/ppc64le": 350,
+ "linux/s390x": 339,
+}
+
+func setns(fd uintptr, flags uintptr) error {
+ ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+ if !exists {
+ return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+ }
+
+ _, _, err := syscall.RawSyscall(ns, fd, flags, 0)
+ if err != 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/README.md b/vendor/src/github.com/docker/libnetwork/test/integration/README.md
new file mode 100644
index 0000000000..777b1cfa46
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/README.md
@@ -0,0 +1,34 @@
+# LibNetwork Integration Tests
+
+Integration tests provide end-to-end testing of LibNetwork and Drivers.
+
+While unit tests verify the code is working as expected by relying on mocks and
+artificially created fixtures, integration tests actually use real docker
+engines and communicate to it through the CLI.
+
+Note that integration tests do **not** replace unit tests and Docker is used as a good use-case.
+
+As a rule of thumb, code should be tested thoroughly with unit tests.
+Integration tests on the other hand are meant to test a specific feature end to end.
+
+Integration tests are written in *bash* using the
+[bats](https://github.com/sstephenson/bats) framework.
+
+## Pre-Requisites
+
+1. Bats (https://github.com/sstephenson/bats#installing-bats-from-source)
+2. Docker Machine (https://github.com/docker/machine)
+3. Virtualbox (as a Docker machine driver)
+
+## Running integration tests
+
+* Start by [installing] (https://github.com/sstephenson/bats#installing-bats-from-source) *bats* on your system.
+* If not done already, [install](https://docs.docker.com/machine/) *docker-machine* into /usr/bin
+* Make sure Virtualbox is installed as well, which will be used by docker-machine as a driver to launch VMs
+
+In order to run all integration tests, pass *bats* the test path:
+```
+$ bats test/integration/daemon-configs.bats
+```
+
+
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats b/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats
new file mode 100644
index 0000000000..fd48fbe199
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats
@@ -0,0 +1,104 @@
+#!/usr/bin/env bats
+
+load helpers
+
+export DRIVER=virtualbox
+export NAME="bats-$DRIVER-daemon-configs"
+export MACHINE_STORAGE_PATH=/tmp/machine-bats-daemon-test-$DRIVER
+# Default memsize is 1024MB and disksize is 20000MB
+# These values are defined in drivers/virtualbox/virtualbox.go
+export DEFAULT_MEMSIZE=1024
+export DEFAULT_DISKSIZE=20000
+export CUSTOM_MEMSIZE=1536
+export CUSTOM_DISKSIZE=10000
+export CUSTOM_CPUCOUNT=1
+export BAD_URL="http://dev.null:9111/bad.iso"
+
+function setup() {
+ # add sleep because vbox; ugh
+ sleep 1
+}
+
+findDiskSize() {
+ # SATA-0-0 is usually the boot2disk.iso image
+ # We assume that SATA 1-0 is root disk VMDK and grab this UUID
+ # e.g. "SATA-ImageUUID-1-0"="fb5f33a7-e4e3-4cb9-877c-f9415ae2adea"
+ # TODO(slashk): does this work on Windows ?
+ run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep SATA-ImageUUID-1-0 | cut -d'=' -f2"
+ run bash -c "VBoxManage showhdinfo $output | grep "Capacity:" | awk -F' ' '{ print $2 }'"
+}
+
+findMemorySize() {
+ run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep memory= | cut -d'=' -f2"
+}
+
+findCPUCount() {
+ run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep cpus= | cut -d'=' -f2"
+}
+
+buildMachineWithOldIsoCheckUpgrade() {
+ run wget https://github.com/boot2docker/boot2docker/releases/download/v1.4.1/boot2docker.iso -O $MACHINE_STORAGE_PATH/cache/boot2docker.iso
+ run machine create -d virtualbox $NAME
+ run machine upgrade $NAME
+}
+
+@test "$DRIVER: machine should not exist" {
+ run machine active $NAME
+ [ "$status" -eq 1 ]
+}
+
+@test "$DRIVER: VM should not exist" {
+ run VBoxManage showvminfo $NAME
+ [ "$status" -eq 1 ]
+}
+
+@test "$DRIVER: create" {
+ run machine create -d $DRIVER $NAME
+ [ "$status" -eq 0 ]
+}
+
+@test "$DRIVER: active" {
+ run machine active $NAME
+ [ "$status" -eq 0 ]
+}
+
+@test "$DRIVER: check default machine memory size" {
+ findMemorySize
+ [[ ${output} == "${DEFAULT_MEMSIZE}" ]]
+}
+
+@test "$DRIVER: check default machine disksize" {
+ findDiskSize
+ [[ ${output} == *"$DEFAULT_DISKSIZE"* ]]
+}
+
+@test "$DRIVER: test bridge-ip" {
+ run machine ssh $NAME sudo /etc/init.d/docker stop
+ run machine ssh $NAME sudo ifconfig docker0 down
+ run machine ssh $NAME sudo ip link delete docker0
+ BIP='--bip=172.168.45.1/24'
+ set_extra_config $BIP
+ cat ${TMP_EXTRA_ARGS_FILE} | machine ssh $NAME sudo tee /var/lib/boot2docker/profile
+ cat ${DAEMON_CFG_FILE} | machine ssh $NAME "sudo tee -a /var/lib/boot2docker/profile"
+ run machine ssh $NAME sudo /etc/init.d/docker start
+ run machine ssh $NAME ifconfig docker0
+ [ "$status" -eq 0 ]
+ [[ ${lines[1]} =~ "172.168.45.1" ]]
+}
+
+@test "$DRIVER: run busybox container" {
+ run machine ssh $NAME sudo cat /var/lib/boot2docker/profile
+ run docker $(machine config $NAME) run busybox echo hello world
+ [ "$status" -eq 0 ]
+}
+
+@test "$DRIVER: remove machine" {
+ run machine rm -f $NAME
+}
+
+# Cleanup of machine store should always be the last 'test'
+@test "$DRIVER: cleanup" {
+ run rm -rf $MACHINE_STORAGE_PATH
+ [ "$status" -eq 0 ]
+}
+
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg b/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg
new file mode 100644
index 0000000000..fc93dbd604
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg
@@ -0,0 +1,4 @@
+CACERT=/var/lib/boot2docker/ca.pem
+SERVERCERT=/var/lib/boot2docker/server-key.pem
+SERVERKEY=/var/lib/boot2docker/server.pem
+DOCKER_TLS=no
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash b/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash
new file mode 100644
index 0000000000..ec18e5d4eb
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Root directory of the repository.
+MACHINE_ROOT=/usr/bin
+
+PLATFORM=`uname -s | tr '[:upper:]' '[:lower:]'`
+ARCH=`uname -m`
+
+if [ "$ARCH" = "x86_64" ]; then
+ ARCH="amd64"
+else
+ ARCH="386"
+fi
+MACHINE_BIN_NAME=docker-machine_$PLATFORM-$ARCH
+BATS_LOG=/tmp/bats.log
+
+touch ${BATS_LOG}
+rm ${BATS_LOG}
+
+teardown() {
+ echo "$BATS_TEST_NAME
+----------
+$output
+----------
+
+" >> ${BATS_LOG}
+}
+
+EXTRA_ARGS_CFG='EXTRA_ARGS'
+EXTRA_ARGS='--tlsverify --tlscacert=/var/lib/boot2docker/ca.pem --tlskey=/var/lib/boot2docker/server-key.pem --tlscert=/var/lib/boot2docker/server.pem --label=provider=virtualbox -H tcp://0.0.0.0:2376'
+TMP_EXTRA_ARGS_FILE=/tmp/tmp_extra_args
+DAEMON_CFG_FILE=${BATS_TEST_DIRNAME}/daemon.cfg
+set_extra_config() {
+ if [ -f ${TMP_EXTRA_ARGS_FILE} ];
+ then
+ rm ${TMP_EXTRA_ARGS_FILE}
+ fi
+ echo -n "${EXTRA_ARGS_CFG}='" > ${TMP_EXTRA_ARGS_FILE}
+ echo -n "$1 " >> ${TMP_EXTRA_ARGS_FILE}
+ echo "${EXTRA_ARGS}'" >> ${TMP_EXTRA_ARGS_FILE}
+}
+
+if [ ! -e $MACHINE_ROOT/$MACHINE_BIN_NAME ]; then
+ echo "${MACHINE_ROOT}/${MACHINE_BIN_NAME} not found"
+ exit 1
+fi
+
+function machine() {
+ ${MACHINE_ROOT}/$MACHINE_BIN_NAME "$@"
+}
diff --git a/vendor/src/github.com/docker/libnetwork/types/types.go b/vendor/src/github.com/docker/libnetwork/types/types.go
new file mode 100644
index 0000000000..3b83485f75
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/types/types.go
@@ -0,0 +1,345 @@
+// Package types contains types that are common across libnetwork project
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strings"
+)
+
+// UUID represents a globally unique ID of various resources like network and endpoint
+type UUID string
+
+// TransportPort represent a local Layer 4 endpoint
+type TransportPort struct {
+ Proto Protocol
+ Port uint16
+}
+
+// GetCopy returns a copy of this TransportPort structure instance
+func (t *TransportPort) GetCopy() TransportPort {
+ return TransportPort{Proto: t.Proto, Port: t.Port}
+}
+
+// PortBinding represent a port binding between the container an the host
+type PortBinding struct {
+ Proto Protocol
+ IP net.IP
+ Port uint16
+ HostIP net.IP
+ HostPort uint16
+}
+
+// HostAddr returns the host side transport address
+func (p PortBinding) HostAddr() (net.Addr, error) {
+ switch p.Proto {
+ case UDP:
+ return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+ case TCP:
+ return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+ default:
+ return nil, ErrInvalidProtocolBinding(p.Proto.String())
+ }
+}
+
+// ContainerAddr returns the container side transport address
+func (p PortBinding) ContainerAddr() (net.Addr, error) {
+ switch p.Proto {
+ case UDP:
+ return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil
+ case TCP:
+ return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil
+ default:
+ return nil, ErrInvalidProtocolBinding(p.Proto.String())
+ }
+}
+
+// GetCopy returns a copy of this PortBinding structure instance
+func (p *PortBinding) GetCopy() PortBinding {
+ return PortBinding{
+ Proto: p.Proto,
+ IP: GetIPCopy(p.IP),
+ Port: p.Port,
+ HostIP: GetIPCopy(p.HostIP),
+ HostPort: p.HostPort,
+ }
+}
+
+// Equal checks if this instance of PortBinding is equal to the passed one
+func (p *PortBinding) Equal(o *PortBinding) bool {
+ if p == o {
+ return true
+ }
+
+ if o == nil {
+ return false
+ }
+
+ if p.Proto != o.Proto || p.Port != o.Port || p.HostPort != o.HostPort {
+ return false
+ }
+
+ if p.IP != nil {
+ if !p.IP.Equal(o.IP) {
+ return false
+ }
+ } else {
+ if o.IP != nil {
+ return false
+ }
+ }
+
+ if p.HostIP != nil {
+ if !p.HostIP.Equal(o.HostIP) {
+ return false
+ }
+ } else {
+ if o.HostIP != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid.
+type ErrInvalidProtocolBinding string
+
+func (ipb ErrInvalidProtocolBinding) Error() string {
+ return fmt.Sprintf("invalid transport protocol: %s", string(ipb))
+}
+
+const (
+ // ICMP is for the ICMP ip protocol
+ ICMP = 1
+ // TCP is for the TCP ip protocol
+ TCP = 6
+ // UDP is for the UDP ip protocol
+ UDP = 17
+)
+
+// Protocol represents a IP protocol number
+type Protocol uint8
+
+func (p Protocol) String() string {
+ switch p {
+ case ICMP:
+ return "icmp"
+ case TCP:
+ return "tcp"
+ case UDP:
+ return "udp"
+ default:
+ return fmt.Sprintf("%d", p)
+ }
+}
+
+// ParseProtocol returns the respective Protocol type for the passed string
+func ParseProtocol(s string) Protocol {
+ switch strings.ToLower(s) {
+ case "icmp":
+ return ICMP
+ case "udp":
+ return UDP
+ case "tcp":
+ return TCP
+ default:
+ return 0
+ }
+}
+
+// GetMacCopy returns a copy of the passed MAC address
+func GetMacCopy(from net.HardwareAddr) net.HardwareAddr {
+ to := make(net.HardwareAddr, len(from))
+ copy(to, from)
+ return to
+}
+
+// GetIPCopy returns a copy of the passed IP address
+func GetIPCopy(from net.IP) net.IP {
+ to := make(net.IP, len(from))
+ copy(to, from)
+ return to
+}
+
+// GetIPNetCopy returns a copy of the passed IP Network
+func GetIPNetCopy(from *net.IPNet) *net.IPNet {
+ if from == nil {
+ return nil
+ }
+ bm := make(net.IPMask, len(from.Mask))
+ copy(bm, from.Mask)
+ return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm}
+}
+
+// CompareIPNet returns equal if the two IP Networks are equal
+func CompareIPNet(a, b *net.IPNet) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask)
+}
+
+/******************************
+ * Well-known Error Interfaces
+ ******************************/
+
+// MaskableError is an interface for errors which can be ignored by caller
+type MaskableError interface {
+ // Maskable makes implementer into MaskableError type
+ Maskable()
+}
+
+// BadRequestError is an interface for errors originated by a bad request
+type BadRequestError interface {
+ // BadRequest makes implementer into BadRequestError type
+ BadRequest()
+}
+
+// NotFoundError is an interface for errors raised because a needed resource is not available
+type NotFoundError interface {
+ // NotFound makes implementer into NotFoundError type
+ NotFound()
+}
+
+// ForbiddenError is an interface for errors which denote an valid request that cannot be honored
+type ForbiddenError interface {
+ // Forbidden makes implementer into ForbiddenError type
+ Forbidden()
+}
+
+// NoServiceError is an interface for errors returned when the required service is not available
+type NoServiceError interface {
+ // NoService makes implementer into NoServiceError type
+ NoService()
+}
+
+// TimeoutError is an interface for errors raised because of timeout
+type TimeoutError interface {
+ // Timeout makes implementer into TimeoutError type
+ Timeout()
+}
+
+// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented
+type NotImplementedError interface {
+ // NotImplemented makes implementer into NotImplementedError type
+ NotImplemented()
+}
+
+// InternalError is an interface for errors raised because of an internal error
+type InternalError interface {
+ // Internal makes implementer into InternalError type
+ Internal()
+}
+
+/******************************
+ * Weel-known Error Formatters
+ ******************************/
+
+// BadRequestErrorf creates an instance of BadRequestError
+func BadRequestErrorf(format string, params ...interface{}) error {
+ return badRequest(fmt.Sprintf(format, params...))
+}
+
+// NotFoundErrorf creates an instance of NotFoundError
+func NotFoundErrorf(format string, params ...interface{}) error {
+ return notFound(fmt.Sprintf(format, params...))
+}
+
+// ForbiddenErrorf creates an instance of ForbiddenError
+func ForbiddenErrorf(format string, params ...interface{}) error {
+ return forbidden(fmt.Sprintf(format, params...))
+}
+
+// NoServiceErrorf creates an instance of NoServiceError
+func NoServiceErrorf(format string, params ...interface{}) error {
+ return noService(fmt.Sprintf(format, params...))
+}
+
+// NotImplementedErrorf creates an instance of NotImplementedError
+func NotImplementedErrorf(format string, params ...interface{}) error {
+ return notImpl(fmt.Sprintf(format, params...))
+}
+
+// TimeoutErrorf creates an instance of TimeoutError
+func TimeoutErrorf(format string, params ...interface{}) error {
+ return timeout(fmt.Sprintf(format, params...))
+}
+
+// InternalErrorf creates an instance of InternalError
+func InternalErrorf(format string, params ...interface{}) error {
+ return internal(fmt.Sprintf(format, params...))
+}
+
+// InternalMaskableErrorf creates an instance of InternalError and MaskableError
+func InternalMaskableErrorf(format string, params ...interface{}) error {
+ return maskInternal(fmt.Sprintf(format, params...))
+}
+
+/***********************
+ * Internal Error Types
+ ***********************/
+type badRequest string
+
+func (br badRequest) Error() string {
+ return string(br)
+}
+func (br badRequest) BadRequest() {}
+
+type maskBadRequest string
+
+type notFound string
+
+func (nf notFound) Error() string {
+ return string(nf)
+}
+func (nf notFound) NotFound() {}
+
+type forbidden string
+
+func (frb forbidden) Error() string {
+ return string(frb)
+}
+func (frb forbidden) Forbidden() {}
+
+type noService string
+
+func (ns noService) Error() string {
+ return string(ns)
+}
+func (ns noService) NoService() {}
+
+type maskNoService string
+
+type timeout string
+
+func (to timeout) Error() string {
+ return string(to)
+}
+func (to timeout) Timeout() {}
+
+type notImpl string
+
+func (ni notImpl) Error() string {
+ return string(ni)
+}
+func (ni notImpl) NotImplemented() {}
+
+type internal string
+
+func (nt internal) Error() string {
+ return string(nt)
+}
+func (nt internal) Internal() {}
+
+type maskInternal string
+
+func (mnt maskInternal) Error() string {
+ return string(mnt)
+}
+func (mnt maskInternal) Internal() {}
+func (mnt maskInternal) Maskable() {}
diff --git a/vendor/src/github.com/docker/libnetwork/types/types_test.go b/vendor/src/github.com/docker/libnetwork/types/types_test.go
new file mode 100644
index 0000000000..9e96ea858d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/types/types_test.go
@@ -0,0 +1,99 @@
+package types
+
+import (
+ "testing"
+
+ _ "github.com/docker/libnetwork/netutils"
+)
+
+func TestErrorConstructors(t *testing.T) {
+ var err error
+
+ err = BadRequestErrorf("Io ho %d uccello", 1)
+ if err.Error() != "Io ho 1 uccello" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(BadRequestError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = NotFoundErrorf("Can't find the %s", "keys")
+ if err.Error() != "Can't find the keys" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(NotFoundError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = ForbiddenErrorf("Can't open door %d", 2)
+ if err.Error() != "Can't open door 2" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(ForbiddenError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = NotImplementedErrorf("Functionality %s is not implemented", "x")
+ if err.Error() != "Functionality x is not implemented" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(NotImplementedError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = TimeoutErrorf("Process %s timed out", "abc")
+ if err.Error() != "Process abc timed out" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(TimeoutError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = NoServiceErrorf("Driver %s is not available", "mh")
+ if err.Error() != "Driver mh is not available" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(NoServiceError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = InternalErrorf("Not sure what happened")
+ if err.Error() != "Not sure what happened" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(InternalError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); ok {
+ t.Fatal(err)
+ }
+
+ err = InternalMaskableErrorf("Minor issue, it can be ignored")
+ if err.Error() != "Minor issue, it can be ignored" {
+ t.Fatal(err)
+ }
+ if _, ok := err.(InternalError); !ok {
+ t.Fatal(err)
+ }
+ if _, ok := err.(MaskableError); !ok {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore b/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore
deleted file mode 100644
index 4cd0cbaf43..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Setup a Global .gitignore for OS and editor generated files:
-# https://help.github.com/articles/ignoring-files
-# git config --global core.excludesfile ~/.gitignore_global
-
-.vagrant
-*.sublime-project
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml b/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml
deleted file mode 100644
index f8e76fc660..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - 1.2
- - tip
-
-# not yet https://github.com/travis-ci/travis-ci/issues/2318
-os:
- - linux
- - osx
-
-notifications:
- email: false
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS b/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS
deleted file mode 100644
index 306091eda6..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS
+++ /dev/null
@@ -1,32 +0,0 @@
-# Names should be added to this file as
-# Name or Organization
-# The email address is not required for organizations.
-
-# You can update this list using the following command:
-#
-# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
-
-# Please keep the list sorted.
-
-Adrien Bustany
-Caleb Spare
-Case Nelson
-Chris Howey
-Christoffer Buchholz
-Dave Cheney
-Francisco Souza
-Hari haran
-John C Barstow
-Kelvin Fo
-Nathan Youngman
-Paul Hammond
-Pursuit92
-Rob Figueiredo
-Soge Zhang
-Tilak Sharma
-Travis Cline
-Tudor Golubenco
-Yukang
-bronze1man
-debrando
-henrikedwards
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md b/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md
deleted file mode 100644
index 79f4ddbaa1..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md
+++ /dev/null
@@ -1,237 +0,0 @@
-# Changelog
-
-## v1.0.4 / 2014-09-07
-
-* kqueue: add dragonfly to the build tags.
-* Rename source code files, rearrange code so exported APIs are at the top.
-* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-
-## v1.0.3 / 2014-08-19
-
-* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
-
-## v1.0.2 / 2014-08-17
-
-* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
-* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-
-## v1.0.0 / 2014-08-15
-
-* [API] Remove AddWatch on Windows, use Add.
-* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
-* Minor updates based on feedback from golint.
-
-## dev / 2014-07-09
-
-* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
-* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
-
-## dev / 2014-07-04
-
-* kqueue: fix incorrect mutex used in Close()
-* Update example to demonstrate usage of Op.
-
-## dev / 2014-06-28
-
-* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
-* Fix for String() method on Event (thanks Alex Brainman)
-* Don't build on Plan 9 or Solaris (thanks @4ad)
-
-## dev / 2014-06-21
-
-* Events channel of type Event rather than *Event.
-* [internal] use syscall constants directly for inotify and kqueue.
-* [internal] kqueue: rename events to kevents and fileEvent to event.
-
-## dev / 2014-06-19
-
-* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
-* [internal] remove cookie from Event struct (unused).
-* [internal] Event struct has the same definition across every OS.
-* [internal] remove internal watch and removeWatch methods.
-
-## dev / 2014-06-12
-
-* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
-* [API] Pluralized channel names: Events and Errors.
-* [API] Renamed FileEvent struct to Event.
-* [API] Op constants replace methods like IsCreate().
-
-## dev / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## dev / 2014-05-23
-
-* [API] Remove current implementation of WatchFlags.
- * current implementation doesn't take advantage of OS for efficiency
- * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
- * no tests for the current implementation
- * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-
-## v0.9.2 / 2014-08-17
-
-* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
-
-## v0.9.1 / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## v0.9.0 / 2014-01-17
-
-* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
-* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
-* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-
-## v0.8.12 / 2013-11-13
-
-* [API] Remove FD_SET and friends from Linux adapter
-
-## v0.8.11 / 2013-11-02
-
-* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
-
-## v0.8.10 / 2013-10-19
-
-* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
-* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
-* [Doc] specify OS-specific limits in README (thanks @debrando)
-
-## v0.8.9 / 2013-09-08
-
-* [Doc] Contributing (thanks @nathany)
-* [Doc] update package path in example code [#63][] (thanks @paulhammond)
-* [Doc] GoCI badge in README (Linux only) [#60][]
-* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
-
-## v0.8.8 / 2013-06-17
-
-* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-
-## v0.8.7 / 2013-06-03
-
-* [API] Make syscall flags internal
-* [Fix] inotify: ignore event changes
-* [Fix] race in symlink test [#45][] (reported by @srid)
-* [Fix] tests on Windows
-* lower case error messages
-
-## v0.8.6 / 2013-05-23
-
-* kqueue: Use EVT_ONLY flag on Darwin
-* [Doc] Update README with full example
-
-## v0.8.5 / 2013-05-09
-
-* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-
-## v0.8.4 / 2013-04-07
-
-* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-
-## v0.8.3 / 2013-03-13
-
-* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
-* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-
-## v0.8.2 / 2013-02-07
-
-* [Doc] add Authors
-* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-
-## v0.8.1 / 2013-01-09
-
-* [Fix] Windows path separators
-* [Doc] BSD License
-
-## v0.8.0 / 2012-11-09
-
-* kqueue: directory watching improvements (thanks @vmirage)
-* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
-* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-
-## v0.7.4 / 2012-10-09
-
-* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
-* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
-* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
-* [Fix] kqueue: modify after recreation of file
-
-## v0.7.3 / 2012-09-27
-
-* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
-* [Fix] kqueue: no longer get duplicate CREATE events
-
-## v0.7.2 / 2012-09-01
-
-* kqueue: events for created directories
-
-## v0.7.1 / 2012-07-14
-
-* [Fix] for renaming files
-
-## v0.7.0 / 2012-07-02
-
-* [Feature] FSNotify flags
-* [Fix] inotify: Added file name back to event path
-
-## v0.6.0 / 2012-06-06
-
-* kqueue: watch files after directory created (thanks @tmc)
-
-## v0.5.1 / 2012-05-22
-
-* [Fix] inotify: remove all watches before Close()
-
-## v0.5.0 / 2012-05-03
-
-* [API] kqueue: return errors during watch instead of sending over channel
-* kqueue: match symlink behavior on Linux
-* inotify: add `DELETE_SELF` (requested by @taralx)
-* [Fix] kqueue: handle EINTR (reported by @robfig)
-* [Doc] Godoc example [#1][] (thanks @davecheney)
-
-## v0.4.0 / 2012-03-30
-
-* Go 1 released: build with go tool
-* [Feature] Windows support using winfsnotify
-* Windows does not have attribute change notifications
-* Roll attribute notifications into IsModify
-
-## v0.3.0 / 2012-02-19
-
-* kqueue: add files when watch directory
-
-## v0.2.0 / 2011-12-30
-
-* update to latest Go weekly code
-
-## v0.1.0 / 2011-10-19
-
-* kqueue: add watch on file creation to match inotify
-* kqueue: create file event
-* inotify: ignore `IN_IGNORED` events
-* event String()
-* linux: common FileEvent functions
-* initial commit
-
-[#79]: https://github.com/howeyc/fsnotify/pull/79
-[#77]: https://github.com/howeyc/fsnotify/pull/77
-[#72]: https://github.com/howeyc/fsnotify/issues/72
-[#71]: https://github.com/howeyc/fsnotify/issues/71
-[#70]: https://github.com/howeyc/fsnotify/issues/70
-[#63]: https://github.com/howeyc/fsnotify/issues/63
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#60]: https://github.com/howeyc/fsnotify/issues/60
-[#59]: https://github.com/howeyc/fsnotify/issues/59
-[#49]: https://github.com/howeyc/fsnotify/issues/49
-[#45]: https://github.com/howeyc/fsnotify/issues/45
-[#40]: https://github.com/howeyc/fsnotify/issues/40
-[#36]: https://github.com/howeyc/fsnotify/issues/36
-[#33]: https://github.com/howeyc/fsnotify/issues/33
-[#29]: https://github.com/howeyc/fsnotify/issues/29
-[#25]: https://github.com/howeyc/fsnotify/issues/25
-[#24]: https://github.com/howeyc/fsnotify/issues/24
-[#21]: https://github.com/howeyc/fsnotify/issues/21
-
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md b/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md
deleted file mode 100644
index 2fd0423cca..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# Contributing
-
-* Send questions to [golang-dev@googlegroups.com](mailto:golang-dev@googlegroups.com).
-
-### Issues
-
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
-* Please indicate the platform you are running on.
-
-### Pull Requests
-
-A future version of Go will have [fsnotify in the standard library](https://code.google.com/p/go/issues/detail?id=4068), therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so we need you to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please indicate that you have signed the CLA in your pull request.
-
-To hack on fsnotify:
-
-1. Install as usual (`go get -u github.com/go-fsnotify/fsnotify`)
-2. Create your feature branch (`git checkout -b my-new-feature`)
-3. Ensure everything works and the tests pass (see below)
-4. Commit your changes (`git commit -am 'Add some feature'`)
-
-Contribute upstream:
-
-1. Fork fsnotify on GitHub
-2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
-3. Push to the branch (`git push fork my-new-feature`)
-4. Create a new Pull Request on GitHub
-
-If other team members need your patch before I merge it:
-
-1. Install as usual (`go get -u github.com/go-fsnotify/fsnotify`)
-2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
-3. Pull your revisions (`git fetch fork; git checkout -b my-new-feature fork/my-new-feature`)
-
-Notice: For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
-
-Note: The maintainers will update the CHANGELOG on your behalf. Please don't modify it in your pull request.
-
-### Testing
-
-fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
-
-Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
-
-To make cross-platform testing easier, I've created a Vagrantfile for Linux and BSD.
-
-* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
-* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
-* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
-* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
-* When you're done, you will want to halt or destroy the Vagrant boxes.
-
-Notice: fsnotify file system events don't work on shared folders. The tests get around this limitation by using a tmp directory, but it is something to be aware of.
-
-Right now I don't have an equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/README.md b/vendor/src/github.com/go-fsnotify/fsnotify/README.md
deleted file mode 100644
index 0759284269..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# File system notifications for Go
-
-[](http://gocover.io/github.com/go-fsnotify/fsnotify) [](https://godoc.org/gopkg.in/fsnotify.v1)
-
-Cross platform: Windows, Linux, BSD and OS X.
-
-|Adapter |OS |Status |
-|----------|----------|----------|
-|inotify |Linux, Android\*|Supported|
-|kqueue |BSD, OS X, iOS\*|Supported|
-|ReadDirectoryChangesW|Windows|Supported|
-|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)|
-|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)|
-|fanotify |Linux 2.6.37+ | |
-|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)|
-| |Plan 9 | |
-
-\* Android and iOS are untested.
-
-Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information.
-
-## API stability
-
-Two major versions of fsnotify exist.
-
-**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with:
-
-```go
-import "gopkg.in/fsnotify.v1"
-```
-
-\* Refer to the package as fsnotify (without the .v1 suffix).
-
-**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1.
-
-```go
-import "gopkg.in/fsnotify.v0"
-```
-
-Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API.
-
-## Contributing
-
-* Send questions to [golang-dev@googlegroups.com](mailto:golang-dev@googlegroups.com).
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
-
-A future version of Go will have [fsnotify in the standard library](https://code.google.com/p/go/issues/detail?id=4068), therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so we need you to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please read [CONTRIBUTING](https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md) before opening a pull request.
-
-## Example
-
-See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go).
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go b/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go
deleted file mode 100644
index 9f2c63f475..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-package fsnotify_test
-
-import (
- "log"
-
- "gopkg.in/fsnotify.v1"
-)
-
-func ExampleNewWatcher() {
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- log.Fatal(err)
- }
- defer watcher.Close()
-
- done := make(chan bool)
- go func() {
- for {
- select {
- case event := <-watcher.Events:
- log.Println("event:", event)
- if event.Op&fsnotify.Write == fsnotify.Write {
- log.Println("modified file:", event.Name)
- }
- case err := <-watcher.Errors:
- log.Println("error:", err)
- }
- }
- }()
-
- err = watcher.Add("/tmp/foo")
- if err != nil {
- log.Fatal(err)
- }
- <-done
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go b/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go
deleted file mode 100644
index 7b5233f4bb..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-// Package fsnotify provides a platform-independent interface for file system notifications.
-package fsnotify
-
-import "fmt"
-
-// Event represents a single file system notification.
-type Event struct {
- Name string // Relative path to the file or directory.
- Op Op // File operation that triggered the event.
-}
-
-// Op describes a set of file operations.
-type Op uint32
-
-// These are the generalized file operations that can trigger a notification.
-const (
- Create Op = 1 << iota
- Write
- Remove
- Rename
- Chmod
-)
-
-// String returns a string representation of the event in the form
-// "file: REMOVE|WRITE|..."
-func (e Event) String() string {
- events := ""
-
- if e.Op&Create == Create {
- events += "|CREATE"
- }
- if e.Op&Remove == Remove {
- events += "|REMOVE"
- }
- if e.Op&Write == Write {
- events += "|WRITE"
- }
- if e.Op&Rename == Rename {
- events += "|RENAME"
- }
- if e.Op&Chmod == Chmod {
- events += "|CHMOD"
- }
-
- if len(events) > 0 {
- events = events[1:]
- }
-
- return fmt.Sprintf("%q: %s", e.Name, events)
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go b/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go
deleted file mode 100644
index f5c0aaef04..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "syscall"
- "unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- mu sync.Mutex // Map access
- fd int // File descriptor (as returned by the inotify_init() syscall)
- watches map[string]*watch // Map of inotify watches (key: path)
- paths map[int]string // Map of watched paths (key: watch descriptor)
- done chan bool // Channel for sending a "quit message" to the reader goroutine
- isClosed bool // Set to true when Close() is first called
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- fd, errno := syscall.InotifyInit()
- if fd == -1 {
- return nil, os.NewSyscallError("inotify_init", errno)
- }
- w := &Watcher{
- fd: fd,
- watches: make(map[string]*watch),
- paths: make(map[int]string),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan bool, 1),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- if w.isClosed {
- return nil
- }
- w.isClosed = true
-
- // Remove all watches
- for name := range w.watches {
- w.Remove(name)
- }
-
- // Send "quit" message to the reader goroutine
- w.done <- true
-
- return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- name = filepath.Clean(name)
- if w.isClosed {
- return errors.New("inotify instance already closed")
- }
-
- const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
- syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
- syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
-
- var flags uint32 = agnosticEvents
-
- w.mu.Lock()
- watchEntry, found := w.watches[name]
- w.mu.Unlock()
- if found {
- watchEntry.flags |= flags
- flags |= syscall.IN_MASK_ADD
- }
- wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
- if wd == -1 {
- return os.NewSyscallError("inotify_add_watch", errno)
- }
-
- w.mu.Lock()
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- w.mu.Unlock()
-
- return nil
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
- w.mu.Lock()
- defer w.mu.Unlock()
- watch, ok := w.watches[name]
- if !ok {
- return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
- }
- success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
- if success == -1 {
- return os.NewSyscallError("inotify_rm_watch", errno)
- }
- delete(w.watches, name)
- return nil
-}
-
-type watch struct {
- wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
- flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
-// readEvents reads from the inotify file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
- var (
- buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
- n int // Number of bytes read with read()
- errno error // Syscall errno
- )
-
- for {
- // See if there is a message on the "done" channel
- select {
- case <-w.done:
- syscall.Close(w.fd)
- close(w.Events)
- close(w.Errors)
- return
- default:
- }
-
- n, errno = syscall.Read(w.fd, buf[:])
-
- // If EOF is received
- if n == 0 {
- syscall.Close(w.fd)
- close(w.Events)
- close(w.Errors)
- return
- }
-
- if n < 0 {
- w.Errors <- os.NewSyscallError("read", errno)
- continue
- }
- if n < syscall.SizeofInotifyEvent {
- w.Errors <- errors.New("inotify: short read in readEvents()")
- continue
- }
-
- var offset uint32
- // We don't know how many events we just read into the buffer
- // While the offset points to at least one whole event...
- for offset <= uint32(n-syscall.SizeofInotifyEvent) {
- // Point "raw" to the event in the buffer
- raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
-
- mask := uint32(raw.Mask)
- nameLen := uint32(raw.Len)
- // If the event happened to the watched directory or the watched file, the kernel
- // doesn't append the filename to the event, but we would like to always fill the
- // the "Name" field with a valid filename. We retrieve the path of the watch from
- // the "paths" map.
- w.mu.Lock()
- name := w.paths[int(raw.Wd)]
- w.mu.Unlock()
- if nameLen > 0 {
- // Point "bytes" at the first byte of the filename
- bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
- // The filename is padded with NULL bytes. TrimRight() gets rid of those.
- name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
- }
-
- event := newEvent(name, mask)
-
- // Send the events that are not ignored on the events channel
- if !event.ignoreLinux(mask) {
- w.Events <- event
- }
-
- // Move to the next event in the buffer
- offset += syscall.SizeofInotifyEvent + nameLen
- }
- }
-}
-
-// Certain types of events can be "ignored" and not sent over the Events
-// channel. Such as events marked ignore by the kernel, or MODIFY events
-// against files that do not exist.
-func (e *Event) ignoreLinux(mask uint32) bool {
- // Ignore anything the inotify API says to ignore
- if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
- return true
- }
-
- // If the event is not a DELETE or RENAME, the file must exist.
- // Otherwise the event is ignored.
- // *Note*: this was put in place because it was seen that a MODIFY
- // event was sent after the DELETE. This ignores that MODIFY and
- // assumes a DELETE will come or has come if the file doesn't exist.
- if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
- _, statErr := os.Lstat(e.Name)
- return os.IsNotExist(statErr)
- }
- return false
-}
-
-// newEvent returns an platform-independent Event based on an inotify mask.
-func newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
- e.Op |= Create
- }
- if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
- e.Op |= Remove
- }
- if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
- e.Op |= Write
- }
- if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
- e.Op |= Rename
- }
- if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go b/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go
deleted file mode 100644
index ad51ab60b2..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go
+++ /dev/null
@@ -1,1120 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-package fsnotify
-
-import (
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "sync/atomic"
- "testing"
- "time"
-)
-
-// An atomic counter
-type counter struct {
- val int32
-}
-
-func (c *counter) increment() {
- atomic.AddInt32(&c.val, 1)
-}
-
-func (c *counter) value() int32 {
- return atomic.LoadInt32(&c.val)
-}
-
-func (c *counter) reset() {
- atomic.StoreInt32(&c.val, 0)
-}
-
-// tempMkdir makes a temporary directory
-func tempMkdir(t *testing.T) string {
- dir, err := ioutil.TempDir("", "fsnotify")
- if err != nil {
- t.Fatalf("failed to create test directory: %s", err)
- }
- return dir
-}
-
-// newWatcher initializes an fsnotify Watcher instance.
-func newWatcher(t *testing.T) *Watcher {
- watcher, err := NewWatcher()
- if err != nil {
- t.Fatalf("NewWatcher() failed: %s", err)
- }
- return watcher
-}
-
-// addWatch adds a watch for a directory
-func addWatch(t *testing.T, watcher *Watcher, dir string) {
- if err := watcher.Add(dir); err != nil {
- t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
- }
-}
-
-func TestFsnotifyMultipleOperations(t *testing.T) {
- watcher := newWatcher(t)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory that's not watched
- testDirToMoveFiles := tempMkdir(t)
- defer os.RemoveAll(testDirToMoveFiles)
-
- testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
- testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
-
- addWatch(t, watcher, testDir)
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived, renameReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Rename == Rename {
- renameReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // Modify the file outside of the watched dir
- f, err = os.Open(testFileRenamed)
- if err != nil {
- t.Fatalf("open test renamed file failed: %s", err)
- }
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Recreate the file that was moved
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Close()
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- mReceived := modifyReceived.value()
- if mReceived != 1 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
- }
- dReceived := deleteReceived.value()
- rReceived := renameReceived.value()
- if dReceived+rReceived != 1 {
- t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyMultipleCreates(t *testing.T) {
- watcher := newWatcher(t)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
-
- addWatch(t, watcher, testDir)
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- os.Remove(testFile)
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Recreate the file
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Close()
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Modify
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // Modify
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- mReceived := modifyReceived.value()
- if mReceived < 3 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
- }
- dReceived := deleteReceived.value()
- if dReceived != 1 {
- t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyDirOnly(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- // This should NOT add any events to the fsnotify event queue
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, modifyReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Create == Create {
- createReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- time.Sleep(time.Millisecond)
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
- os.Remove(testFile)
- os.Remove(testFileAlreadyExists)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 1 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
- }
- mReceived := modifyReceived.value()
- if mReceived != 1 {
- t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
- }
- dReceived := deleteReceived.value()
- if dReceived != 2 {
- t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyDeleteWatchedDir(t *testing.T) {
- watcher := newWatcher(t)
- defer watcher.Close()
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- addWatch(t, watcher, testDir)
-
- // Add a watch for testFile
- addWatch(t, watcher, testFileAlreadyExists)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var deleteReceived counter
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
- t.Logf("event received: %s", event)
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- }()
-
- os.RemoveAll(testDir)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- dReceived := deleteReceived.value()
- if dReceived < 2 {
- t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
- }
-}
-
-func TestFsnotifySubDir(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
- testSubDir := filepath.Join(testDir, "sub")
- testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived, deleteReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
- t.Logf("event received: %s", event)
- if event.Op&Create == Create {
- createReceived.increment()
- }
- if event.Op&Remove == Remove {
- deleteReceived.increment()
- }
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- addWatch(t, watcher, testDir)
-
- // Create sub-directory
- if err := os.Mkdir(testSubDir, 0777); err != nil {
- t.Fatalf("failed to create test sub-directory: %s", err)
- }
-
- // Create a file
- var f *os.File
- f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- // Create a file (Should not see this! we are not watching subdir)
- var fs *os.File
- fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- fs.Sync()
- fs.Close()
-
- time.Sleep(200 * time.Millisecond)
-
- // Make sure receive deletes for both file and sub-directory
- os.RemoveAll(testSubDir)
- os.Remove(testFile1)
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- cReceived := createReceived.value()
- if cReceived != 2 {
- t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
- }
- dReceived := deleteReceived.value()
- if dReceived != 2 {
- t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-}
-
-func TestFsnotifyRename(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var renameReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
- if event.Op&Rename == Rename {
- renameReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- // Add a watch for testFile
- addWatch(t, watcher, testFile)
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if renameReceived.value() == 0 {
- t.Fatal("fsnotify rename events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToCreate(t *testing.T) {
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory to get file
- testDirFrom := tempMkdir(t)
- defer os.RemoveAll(testDirFrom)
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var createReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
- if event.Op&Create == Create {
- createReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if createReceived.value() == 0 {
- t.Fatal("fsnotify create events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToOverwrite(t *testing.T) {
- switch runtime.GOOS {
- case "plan9", "windows":
- t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create directory to get file
- testDirFrom := tempMkdir(t)
- defer os.RemoveAll(testDirFrom)
-
- testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
- testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
- // Create a file
- var fr *os.File
- fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- fr.Sync()
- fr.Close()
-
- addWatch(t, watcher, testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- var eventReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testFileRenamed) {
- eventReceived.increment()
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
-
- if err := testRename(testFile, testFileRenamed); err != nil {
- t.Fatalf("rename failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
- if eventReceived.value() == 0 {
- t.Fatal("fsnotify events have not been received after 500 ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(2 * time.Second):
- t.Fatal("event stream was not closed after 2 seconds")
- }
-
- os.Remove(testFileRenamed)
-}
-
-func TestRemovalOfWatch(t *testing.T) {
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- watcher := newWatcher(t)
- defer watcher.Close()
-
- addWatch(t, watcher, testDir)
- if err := watcher.Remove(testDir); err != nil {
- t.Fatalf("Could not remove the watch: %v\n", err)
- }
-
- go func() {
- select {
- case ev := <-watcher.Events:
- t.Fatalf("We received event: %v\n", ev)
- case <-time.After(500 * time.Millisecond):
- t.Log("No event received, as expected.")
- }
- }()
-
- time.Sleep(200 * time.Millisecond)
- // Modify the file outside of the watched dir
- f, err := os.Open(testFileAlreadyExists)
- if err != nil {
- t.Fatalf("Open test file failed: %s", err)
- }
- f.WriteString("data")
- f.Sync()
- f.Close()
- if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
- time.Sleep(400 * time.Millisecond)
-}
-
-func TestFsnotifyAttrib(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("attributes don't work on Windows.")
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for err := range watcher.Errors {
- t.Fatalf("error received: %s", err)
- }
- }()
-
- testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
-
- // Receive events on the event channel on a separate goroutine
- eventstream := watcher.Events
- // The modifyReceived counter counts IsModify events that are not IsAttrib,
- // and the attribReceived counts IsAttrib events (which are also IsModify as
- // a consequence).
- var modifyReceived counter
- var attribReceived counter
- done := make(chan bool)
- go func() {
- for event := range eventstream {
- // Only count relevant events
- if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
- if event.Op&Write == Write {
- modifyReceived.increment()
- }
- if event.Op&Chmod == Chmod {
- attribReceived.increment()
- }
- t.Logf("event received: %s", event)
- } else {
- t.Logf("unexpected event received: %s", event)
- }
- }
- done <- true
- }()
-
- // Create a file
- // This should add at least one event to the fsnotify event queue
- var f *os.File
- f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
-
- f.WriteString("data")
- f.Sync()
- f.Close()
-
- // Add a watch for testFile
- addWatch(t, watcher, testFile)
-
- if err := os.Chmod(testFile, 0700); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
- time.Sleep(500 * time.Millisecond)
- if modifyReceived.value() != 0 {
- t.Fatal("received an unexpected modify event when creating a test file")
- }
- if attribReceived.value() == 0 {
- t.Fatal("fsnotify attribute events have not received after 500 ms")
- }
-
- // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
- // might have been modified).
- modifyReceived.reset()
- attribReceived.reset()
-
- f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
- if err != nil {
- t.Fatalf("reopening test file failed: %s", err)
- }
-
- f.WriteString("more data")
- f.Sync()
- f.Close()
-
- time.Sleep(500 * time.Millisecond)
-
- if modifyReceived.value() != 1 {
- t.Fatal("didn't receive a modify event after changing test file contents")
- }
-
- if attribReceived.value() != 0 {
- t.Fatal("did receive an unexpected attrib event after changing test file contents")
- }
-
- modifyReceived.reset()
- attribReceived.reset()
-
- // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
- // of the file are not changed though)
- if err := os.Chmod(testFile, 0600); err != nil {
- t.Fatalf("chmod failed: %s", err)
- }
-
- time.Sleep(500 * time.Millisecond)
-
- if attribReceived.value() != 1 {
- t.Fatal("didn't receive an attribute change after 500ms")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
- t.Log("waiting for the event channel to become closed...")
- select {
- case <-done:
- t.Log("event channel closed")
- case <-time.After(1e9):
- t.Fatal("event stream was not closed after 1 second")
- }
-
- os.Remove(testFile)
-}
-
-func TestFsnotifyClose(t *testing.T) {
- watcher := newWatcher(t)
- watcher.Close()
-
- var done int32
- go func() {
- watcher.Close()
- atomic.StoreInt32(&done, 1)
- }()
-
- time.Sleep(50e6) // 50 ms
- if atomic.LoadInt32(&done) == 0 {
- t.Fatal("double Close() test failed: second Close() call didn't return")
- }
-
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- if err := watcher.Add(testDir); err == nil {
- t.Fatal("expected error on Watch() after Close(), got nil")
- }
-}
-
-func TestFsnotifyFakeSymlink(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("symlinks don't work on Windows.")
- }
-
- watcher := newWatcher(t)
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- var errorsReceived counter
- // Receive errors on the error channel on a separate goroutine
- go func() {
- for errors := range watcher.Errors {
- t.Logf("Received error: %s", errors)
- errorsReceived.increment()
- }
- }()
-
- // Count the CREATE events received
- var createEventsReceived, otherEventsReceived counter
- go func() {
- for ev := range watcher.Events {
- t.Logf("event received: %s", ev)
- if ev.Op&Create == Create {
- createEventsReceived.increment()
- } else {
- otherEventsReceived.increment()
- }
- }
- }()
-
- addWatch(t, watcher, testDir)
-
- if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
- t.Fatalf("Failed to create bogus symlink: %s", err)
- }
- t.Logf("Created bogus symlink")
-
- // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
- time.Sleep(500 * time.Millisecond)
-
- // Should not be error, just no events for broken links (watching nothing)
- if errorsReceived.value() > 0 {
- t.Fatal("fsnotify errors have been received.")
- }
- if otherEventsReceived.value() > 0 {
- t.Fatal("fsnotify other events received on the broken link")
- }
-
- // Except for 1 create event (for the link itself)
- if createEventsReceived.value() == 0 {
- t.Fatal("fsnotify create events were not received after 500 ms")
- }
- if createEventsReceived.value() > 1 {
- t.Fatal("fsnotify more create events received than expected")
- }
-
- // Try closing the fsnotify instance
- t.Log("calling Close()")
- watcher.Close()
-}
-
-// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
-// See https://codereview.appspot.com/103300045/
-// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
-func TestConcurrentRemovalOfWatch(t *testing.T) {
- if runtime.GOOS != "darwin" {
- t.Skip("regression test for race only present on darwin")
- }
-
- // Create directory to watch
- testDir := tempMkdir(t)
- defer os.RemoveAll(testDir)
-
- // Create a file before watching directory
- testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
- {
- var f *os.File
- f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- t.Fatalf("creating test file failed: %s", err)
- }
- f.Sync()
- f.Close()
- }
-
- watcher := newWatcher(t)
- defer watcher.Close()
-
- addWatch(t, watcher, testDir)
-
- // Test that RemoveWatch can be invoked concurrently, with no data races.
- removed1 := make(chan struct{})
- go func() {
- defer close(removed1)
- watcher.Remove(testDir)
- }()
- removed2 := make(chan struct{})
- go func() {
- close(removed2)
- watcher.Remove(testDir)
- }()
- <-removed1
- <-removed2
-}
-
-func testRename(file1, file2 string) error {
- switch runtime.GOOS {
- case "windows", "plan9":
- return os.Rename(file1, file2)
- default:
- cmd := exec.Command("mv", file1, file2)
- return cmd.Run()
- }
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go b/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go
deleted file mode 100644
index 5ef1346c0d..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly darwin
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "syscall"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- mu sync.Mutex // Mutex for the Watcher itself.
- kq int // File descriptor (as returned by the kqueue() syscall).
- watches map[string]int // Map of watched file descriptors (key: path).
- wmut sync.Mutex // Protects access to watches.
- enFlags map[string]uint32 // Map of watched files to evfilt note flags used in kqueue.
- enmut sync.Mutex // Protects access to enFlags.
- paths map[int]string // Map of watched paths (key: watch descriptor).
- finfo map[int]os.FileInfo // Map of file information (isDir, isReg; key: watch descriptor).
- pmut sync.Mutex // Protects access to paths and finfo.
- fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
- femut sync.Mutex // Protects access to fileExists.
- externalWatches map[string]bool // Map of watches added by user of the library.
- ewmut sync.Mutex // Protects access to externalWatches.
- done chan bool // Channel for sending a "quit message" to the reader goroutine
- isClosed bool // Set to true when Close() is first called
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- fd, errno := syscall.Kqueue()
- if fd == -1 {
- return nil, os.NewSyscallError("kqueue", errno)
- }
- w := &Watcher{
- kq: fd,
- watches: make(map[string]int),
- enFlags: make(map[string]uint32),
- paths: make(map[int]string),
- finfo: make(map[int]os.FileInfo),
- fileExists: make(map[string]bool),
- externalWatches: make(map[string]bool),
- Events: make(chan Event),
- Errors: make(chan error),
- done: make(chan bool, 1),
- }
-
- go w.readEvents()
- return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return nil
- }
- w.isClosed = true
- w.mu.Unlock()
-
- // Send "quit" message to the reader goroutine:
- w.done <- true
- w.wmut.Lock()
- ws := w.watches
- w.wmut.Unlock()
- for name := range ws {
- w.Remove(name)
- }
-
- return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- w.ewmut.Lock()
- w.externalWatches[name] = true
- w.ewmut.Unlock()
- return w.addWatch(name, noteAllEvents)
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- name = filepath.Clean(name)
- w.wmut.Lock()
- watchfd, ok := w.watches[name]
- w.wmut.Unlock()
- if !ok {
- return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
- }
- var kbuf [1]syscall.Kevent_t
- watchEntry := &kbuf[0]
- syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
- entryFlags := watchEntry.Flags
- success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
- if success == -1 {
- return os.NewSyscallError("kevent_rm_watch", errno)
- } else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
- return errors.New("kevent rm error")
- }
- syscall.Close(watchfd)
- w.wmut.Lock()
- delete(w.watches, name)
- w.wmut.Unlock()
- w.enmut.Lock()
- delete(w.enFlags, name)
- w.enmut.Unlock()
- w.pmut.Lock()
- delete(w.paths, watchfd)
- fInfo := w.finfo[watchfd]
- delete(w.finfo, watchfd)
- w.pmut.Unlock()
-
- // Find all watched paths that are in this directory that are not external.
- if fInfo.IsDir() {
- var pathsToRemove []string
- w.pmut.Lock()
- for _, wpath := range w.paths {
- wdir, _ := filepath.Split(wpath)
- if filepath.Clean(wdir) == filepath.Clean(name) {
- w.ewmut.Lock()
- if !w.externalWatches[wpath] {
- pathsToRemove = append(pathsToRemove, wpath)
- }
- w.ewmut.Unlock()
- }
- }
- w.pmut.Unlock()
- for _, name := range pathsToRemove {
- // Since these are internal, not much sense in propagating error
- // to the user, as that will just confuse them with an error about
- // a path they did not explicitly watch themselves.
- w.Remove(name)
- }
- }
-
- return nil
-}
-
-const (
- // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
- noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
-
- // Block for 100 ms on each call to kevent
- keventWaitTime = 100e6
-)
-
-// addWatch adds path to the watched file set.
-// The flags are interpreted as described in kevent(2).
-func (w *Watcher) addWatch(path string, flags uint32) error {
- path = filepath.Clean(path)
- w.mu.Lock()
- if w.isClosed {
- w.mu.Unlock()
- return errors.New("kevent instance already closed")
- }
- w.mu.Unlock()
-
- watchDir := false
-
- w.wmut.Lock()
- watchfd, found := w.watches[path]
- w.wmut.Unlock()
- if !found {
- fi, errstat := os.Lstat(path)
- if errstat != nil {
- return errstat
- }
-
- // don't watch socket
- if fi.Mode()&os.ModeSocket == os.ModeSocket {
- return nil
- }
-
- // Follow Symlinks
- // Unfortunately, Linux can add bogus symlinks to watch list without
- // issue, and Windows can't do symlinks period (AFAIK). To maintain
- // consistency, we will act like everything is fine. There will simply
- // be no file events for broken symlinks.
- // Hence the returns of nil on errors.
- if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
- path, err := filepath.EvalSymlinks(path)
- if err != nil {
- return nil
- }
-
- fi, errstat = os.Lstat(path)
- if errstat != nil {
- return nil
- }
- }
-
- fd, errno := syscall.Open(path, openMode, 0700)
- if fd == -1 {
- return os.NewSyscallError("Open", errno)
- }
- watchfd = fd
-
- w.wmut.Lock()
- w.watches[path] = watchfd
- w.wmut.Unlock()
-
- w.pmut.Lock()
- w.paths[watchfd] = path
- w.finfo[watchfd] = fi
- w.pmut.Unlock()
- }
- // Watch the directory if it has not been watched before.
- w.pmut.Lock()
- w.enmut.Lock()
- if w.finfo[watchfd].IsDir() &&
- (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
- (!found || (w.enFlags[path]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) {
- watchDir = true
- }
- w.enmut.Unlock()
- w.pmut.Unlock()
-
- w.enmut.Lock()
- w.enFlags[path] = flags
- w.enmut.Unlock()
-
- var kbuf [1]syscall.Kevent_t
- watchEntry := &kbuf[0]
- watchEntry.Fflags = flags
- syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_ADD|syscall.EV_CLEAR)
- entryFlags := watchEntry.Flags
- success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
- if success == -1 {
- return errno
- } else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
- return errors.New("kevent add error")
- }
-
- if watchDir {
- errdir := w.watchDirectoryFiles(path)
- if errdir != nil {
- return errdir
- }
- }
- return nil
-}
-
-// readEvents reads from the kqueue file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
- var (
- keventbuf [10]syscall.Kevent_t // Event buffer
- kevents []syscall.Kevent_t // Received events
- twait *syscall.Timespec // Time to block waiting for events
- n int // Number of events returned from kevent
- errno error // Syscall errno
- )
- kevents = keventbuf[0:0]
- twait = new(syscall.Timespec)
- *twait = syscall.NsecToTimespec(keventWaitTime)
-
- for {
- // See if there is a message on the "done" channel
- var done bool
- select {
- case done = <-w.done:
- default:
- }
-
- // If "done" message is received
- if done {
- errno := syscall.Close(w.kq)
- if errno != nil {
- w.Errors <- os.NewSyscallError("close", errno)
- }
- close(w.Events)
- close(w.Errors)
- return
- }
-
- // Get new events
- if len(kevents) == 0 {
- n, errno = syscall.Kevent(w.kq, nil, keventbuf[:], twait)
-
- // EINTR is okay, basically the syscall was interrupted before
- // timeout expired.
- if errno != nil && errno != syscall.EINTR {
- w.Errors <- os.NewSyscallError("kevent", errno)
- continue
- }
-
- // Received some events
- if n > 0 {
- kevents = keventbuf[0:n]
- }
- }
-
- // Flush the events we received to the Events channel
- for len(kevents) > 0 {
- watchEvent := &kevents[0]
- mask := uint32(watchEvent.Fflags)
- w.pmut.Lock()
- name := w.paths[int(watchEvent.Ident)]
- fileInfo := w.finfo[int(watchEvent.Ident)]
- w.pmut.Unlock()
-
- event := newEvent(name, mask, false)
-
- if fileInfo != nil && fileInfo.IsDir() && !(event.Op&Remove == Remove) {
- // Double check to make sure the directory exist. This can happen when
- // we do a rm -fr on a recursively watched folders and we receive a
- // modification event first but the folder has been deleted and later
- // receive the delete event
- if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
- // mark is as delete event
- event.Op |= Remove
- }
- }
-
- if fileInfo != nil && fileInfo.IsDir() && event.Op&Write == Write && !(event.Op&Remove == Remove) {
- w.sendDirectoryChangeEvents(event.Name)
- } else {
- // Send the event on the Events channel
- w.Events <- event
- }
-
- // Move to next event
- kevents = kevents[1:]
-
- if event.Op&Rename == Rename {
- w.Remove(event.Name)
- w.femut.Lock()
- delete(w.fileExists, event.Name)
- w.femut.Unlock()
- }
- if event.Op&Remove == Remove {
- w.Remove(event.Name)
- w.femut.Lock()
- delete(w.fileExists, event.Name)
- w.femut.Unlock()
-
- // Look for a file that may have overwritten this
- // (ie mv f1 f2 will delete f2 then create f2)
- fileDir, _ := filepath.Split(event.Name)
- fileDir = filepath.Clean(fileDir)
- w.wmut.Lock()
- _, found := w.watches[fileDir]
- w.wmut.Unlock()
- if found {
- // make sure the directory exist before we watch for changes. When we
- // do a recursive watch and perform rm -fr, the parent directory might
- // have gone missing, ignore the missing directory and let the
- // upcoming delete event remove the watch form the parent folder
- if _, err := os.Lstat(fileDir); !os.IsNotExist(err) {
- w.sendDirectoryChangeEvents(fileDir)
- }
- }
- }
- }
- }
-}
-
-// newEvent returns an platform-independent Event based on kqueue Fflags.
-func newEvent(name string, mask uint32, create bool) Event {
- e := Event{Name: name}
- if create {
- e.Op |= Create
- }
- if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
- e.Op |= Remove
- }
- if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
- e.Op |= Write
- }
- if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
- e.Op |= Rename
- }
- if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
- // Get all files
- files, err := ioutil.ReadDir(dirPath)
- if err != nil {
- return err
- }
-
- // Search for new files
- for _, fileInfo := range files {
- filePath := filepath.Join(dirPath, fileInfo.Name())
-
- if fileInfo.IsDir() == false {
- // Watch file to mimic linux fsnotify
- e := w.addWatch(filePath, noteAllEvents)
- if e != nil {
- return e
- }
- } else {
- // If the user is currently watching directory
- // we want to preserve the flags used
- w.enmut.Lock()
- currFlags, found := w.enFlags[filePath]
- w.enmut.Unlock()
- var newFlags uint32 = syscall.NOTE_DELETE
- if found {
- newFlags |= currFlags
- }
-
- // Linux gives deletes if not explicitly watching
- e := w.addWatch(filePath, newFlags)
- if e != nil {
- return e
- }
- }
- w.femut.Lock()
- w.fileExists[filePath] = true
- w.femut.Unlock()
- }
-
- return nil
-}
-
-// sendDirectoryEvents searches the directory for newly created files
-// and sends them over the event channel. This functionality is to have
-// the BSD version of fsnotify match linux fsnotify which provides a
-// create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
- // Get all files
- files, err := ioutil.ReadDir(dirPath)
- if err != nil {
- w.Errors <- err
- }
-
- // Search for new files
- for _, fileInfo := range files {
- filePath := filepath.Join(dirPath, fileInfo.Name())
- w.femut.Lock()
- _, doesExist := w.fileExists[filePath]
- w.femut.Unlock()
- if !doesExist {
- // Send create event (mask=0)
- event := newEvent(filePath, 0, true)
- w.Events <- event
- }
-
- // watchDirectoryFiles (but without doing another ReadDir)
- if fileInfo.IsDir() == false {
- // Watch file to mimic linux fsnotify
- w.addWatch(filePath, noteAllEvents)
- } else {
- // If the user is currently watching directory
- // we want to preserve the flags used
- w.enmut.Lock()
- currFlags, found := w.enFlags[filePath]
- w.enmut.Unlock()
- var newFlags uint32 = syscall.NOTE_DELETE
- if found {
- newFlags |= currFlags
- }
-
- // Linux gives deletes if not explicitly watching
- w.addWatch(filePath, newFlags)
- }
-
- w.femut.Lock()
- w.fileExists[filePath] = true
- w.femut.Unlock()
- }
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go b/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go
deleted file mode 100644
index c57ccb427b..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly
-
-package fsnotify
-
-import "syscall"
-
-const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go b/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go
deleted file mode 100644
index 174b2c331f..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin
-
-package fsnotify
-
-import "syscall"
-
-// note: this constant is not defined on BSD
-const openMode = syscall.O_EVTONLY
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/windows.go b/vendor/src/github.com/go-fsnotify/fsnotify/windows.go
deleted file mode 100644
index 811585227d..0000000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/windows.go
+++ /dev/null
@@ -1,561 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package fsnotify
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "runtime"
- "sync"
- "syscall"
- "unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
- Events chan Event
- Errors chan error
- isClosed bool // Set to true when Close() is first called
- mu sync.Mutex // Map access
- port syscall.Handle // Handle to completion port
- watches watchMap // Map of watches (key: i-number)
- input chan *input // Inputs to the reader are sent on this channel
- quit chan chan<- error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
- port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
- if e != nil {
- return nil, os.NewSyscallError("CreateIoCompletionPort", e)
- }
- w := &Watcher{
- port: port,
- watches: make(watchMap),
- input: make(chan *input, 1),
- Events: make(chan Event, 50),
- Errors: make(chan error),
- quit: make(chan chan<- error, 1),
- }
- go w.readEvents()
- return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
- if w.isClosed {
- return nil
- }
- w.isClosed = true
-
- // Send "quit" message to the reader goroutine
- ch := make(chan error)
- w.quit <- ch
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-ch
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
- if w.isClosed {
- return errors.New("watcher already closed")
- }
- in := &input{
- op: opAddWatch,
- path: filepath.Clean(name),
- flags: sys_FS_ALL_EVENTS,
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
- in := &input{
- op: opRemoveWatch,
- path: filepath.Clean(name),
- reply: make(chan error),
- }
- w.input <- in
- if err := w.wakeupReader(); err != nil {
- return err
- }
- return <-in.reply
-}
-
-const (
- // Options for AddWatch
- sys_FS_ONESHOT = 0x80000000
- sys_FS_ONLYDIR = 0x1000000
-
- // Events
- sys_FS_ACCESS = 0x1
- sys_FS_ALL_EVENTS = 0xfff
- sys_FS_ATTRIB = 0x4
- sys_FS_CLOSE = 0x18
- sys_FS_CREATE = 0x100
- sys_FS_DELETE = 0x200
- sys_FS_DELETE_SELF = 0x400
- sys_FS_MODIFY = 0x2
- sys_FS_MOVE = 0xc0
- sys_FS_MOVED_FROM = 0x40
- sys_FS_MOVED_TO = 0x80
- sys_FS_MOVE_SELF = 0x800
-
- // Special events
- sys_FS_IGNORED = 0x8000
- sys_FS_Q_OVERFLOW = 0x4000
-)
-
-func newEvent(name string, mask uint32) Event {
- e := Event{Name: name}
- if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
- e.Op |= Create
- }
- if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
- e.Op |= Remove
- }
- if mask&sys_FS_MODIFY == sys_FS_MODIFY {
- e.Op |= Write
- }
- if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
- e.Op |= Rename
- }
- if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
- e.Op |= Chmod
- }
- return e
-}
-
-const (
- opAddWatch = iota
- opRemoveWatch
-)
-
-const (
- provisional uint64 = 1 << (32 + iota)
-)
-
-type input struct {
- op int
- path string
- flags uint32
- reply chan error
-}
-
-type inode struct {
- handle syscall.Handle
- volume uint32
- index uint64
-}
-
-type watch struct {
- ov syscall.Overlapped
- ino *inode // i-number
- path string // Directory path
- mask uint64 // Directory itself is being watched with these notify flags
- names map[string]uint64 // Map of names being watched and their notify flags
- rename string // Remembers the old name while renaming a file
- buf [4096]byte
-}
-
-type indexMap map[uint64]*watch
-type watchMap map[uint32]indexMap
-
-func (w *Watcher) wakeupReader() error {
- e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
- if e != nil {
- return os.NewSyscallError("PostQueuedCompletionStatus", e)
- }
- return nil
-}
-
-func getDir(pathname string) (dir string, err error) {
- attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
- if e != nil {
- return "", os.NewSyscallError("GetFileAttributes", e)
- }
- if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
- dir = pathname
- } else {
- dir, _ = filepath.Split(pathname)
- dir = filepath.Clean(dir)
- }
- return
-}
-
-func getIno(path string) (ino *inode, err error) {
- h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
- syscall.FILE_LIST_DIRECTORY,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
- nil, syscall.OPEN_EXISTING,
- syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
- if e != nil {
- return nil, os.NewSyscallError("CreateFile", e)
- }
- var fi syscall.ByHandleFileInformation
- if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
- syscall.CloseHandle(h)
- return nil, os.NewSyscallError("GetFileInformationByHandle", e)
- }
- ino = &inode{
- handle: h,
- volume: fi.VolumeSerialNumber,
- index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
- }
- return ino, nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) get(ino *inode) *watch {
- if i := m[ino.volume]; i != nil {
- return i[ino.index]
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) set(ino *inode, watch *watch) {
- i := m[ino.volume]
- if i == nil {
- i = make(indexMap)
- m[ino.volume] = i
- }
- i[ino.index] = watch
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
- dir, err := getDir(pathname)
- if err != nil {
- return err
- }
- if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
- return nil
- }
- ino, err := getIno(dir)
- if err != nil {
- return err
- }
- w.mu.Lock()
- watchEntry := w.watches.get(ino)
- w.mu.Unlock()
- if watchEntry == nil {
- if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
- syscall.CloseHandle(ino.handle)
- return os.NewSyscallError("CreateIoCompletionPort", e)
- }
- watchEntry = &watch{
- ino: ino,
- path: dir,
- names: make(map[string]uint64),
- }
- w.mu.Lock()
- w.watches.set(ino, watchEntry)
- w.mu.Unlock()
- flags |= provisional
- } else {
- syscall.CloseHandle(ino.handle)
- }
- if pathname == dir {
- watchEntry.mask |= flags
- } else {
- watchEntry.names[filepath.Base(pathname)] |= flags
- }
- if err = w.startRead(watchEntry); err != nil {
- return err
- }
- if pathname == dir {
- watchEntry.mask &= ^provisional
- } else {
- watchEntry.names[filepath.Base(pathname)] &= ^provisional
- }
- return nil
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
- dir, err := getDir(pathname)
- if err != nil {
- return err
- }
- ino, err := getIno(dir)
- if err != nil {
- return err
- }
- w.mu.Lock()
- watch := w.watches.get(ino)
- w.mu.Unlock()
- if watch == nil {
- return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
- }
- if pathname == dir {
- w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
- watch.mask = 0
- } else {
- name := filepath.Base(pathname)
- w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
- delete(watch.names, name)
- }
- return w.startRead(watch)
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
- for name, mask := range watch.names {
- if mask&provisional == 0 {
- w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
- }
- delete(watch.names, name)
- }
- if watch.mask != 0 {
- if watch.mask&provisional == 0 {
- w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
- }
- watch.mask = 0
- }
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
- if e := syscall.CancelIo(watch.ino.handle); e != nil {
- w.Errors <- os.NewSyscallError("CancelIo", e)
- w.deleteWatch(watch)
- }
- mask := toWindowsFlags(watch.mask)
- for _, m := range watch.names {
- mask |= toWindowsFlags(m)
- }
- if mask == 0 {
- if e := syscall.CloseHandle(watch.ino.handle); e != nil {
- w.Errors <- os.NewSyscallError("CloseHandle", e)
- }
- w.mu.Lock()
- delete(w.watches[watch.ino.volume], watch.ino.index)
- w.mu.Unlock()
- return nil
- }
- e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
- uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
- if e != nil {
- err := os.NewSyscallError("ReadDirectoryChanges", e)
- if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
- // Watched directory was probably removed
- if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
- if watch.mask&sys_FS_ONESHOT != 0 {
- watch.mask = 0
- }
- }
- err = nil
- }
- w.deleteWatch(watch)
- w.startRead(watch)
- return err
- }
- return nil
-}
-
-// readEvents reads from the I/O completion port, converts the
-// received events into Event objects and sends them via the Events channel.
-// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
- var (
- n, key uint32
- ov *syscall.Overlapped
- )
- runtime.LockOSThread()
-
- for {
- e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
- watch := (*watch)(unsafe.Pointer(ov))
-
- if watch == nil {
- select {
- case ch := <-w.quit:
- w.mu.Lock()
- var indexes []indexMap
- for _, index := range w.watches {
- indexes = append(indexes, index)
- }
- w.mu.Unlock()
- for _, index := range indexes {
- for _, watch := range index {
- w.deleteWatch(watch)
- w.startRead(watch)
- }
- }
- var err error
- if e := syscall.CloseHandle(w.port); e != nil {
- err = os.NewSyscallError("CloseHandle", e)
- }
- close(w.Events)
- close(w.Errors)
- ch <- err
- return
- case in := <-w.input:
- switch in.op {
- case opAddWatch:
- in.reply <- w.addWatch(in.path, uint64(in.flags))
- case opRemoveWatch:
- in.reply <- w.remWatch(in.path)
- }
- default:
- }
- continue
- }
-
- switch e {
- case syscall.ERROR_MORE_DATA:
- if watch == nil {
- w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
- } else {
- // The i/o succeeded but the buffer is full.
- // In theory we should be building up a full packet.
- // In practice we can get away with just carrying on.
- n = uint32(unsafe.Sizeof(watch.buf))
- }
- case syscall.ERROR_ACCESS_DENIED:
- // Watched directory was probably removed
- w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
- w.deleteWatch(watch)
- w.startRead(watch)
- continue
- case syscall.ERROR_OPERATION_ABORTED:
- // CancelIo was called on this handle
- continue
- default:
- w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
- continue
- case nil:
- }
-
- var offset uint32
- for {
- if n == 0 {
- w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
- w.Errors <- errors.New("short read in readEvents()")
- break
- }
-
- // Point "raw" to the event in the buffer
- raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
- buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
- name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
- fullname := watch.path + "\\" + name
-
- var mask uint64
- switch raw.Action {
- case syscall.FILE_ACTION_REMOVED:
- mask = sys_FS_DELETE_SELF
- case syscall.FILE_ACTION_MODIFIED:
- mask = sys_FS_MODIFY
- case syscall.FILE_ACTION_RENAMED_OLD_NAME:
- watch.rename = name
- case syscall.FILE_ACTION_RENAMED_NEW_NAME:
- if watch.names[watch.rename] != 0 {
- watch.names[name] |= watch.names[watch.rename]
- delete(watch.names, watch.rename)
- mask = sys_FS_MOVE_SELF
- }
- }
-
- sendNameEvent := func() {
- if w.sendEvent(fullname, watch.names[name]&mask) {
- if watch.names[name]&sys_FS_ONESHOT != 0 {
- delete(watch.names, name)
- }
- }
- }
- if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
- sendNameEvent()
- }
- if raw.Action == syscall.FILE_ACTION_REMOVED {
- w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
- delete(watch.names, name)
- }
- if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
- if watch.mask&sys_FS_ONESHOT != 0 {
- watch.mask = 0
- }
- }
- if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
- fullname = watch.path + "\\" + watch.rename
- sendNameEvent()
- }
-
- // Move to the next event in the buffer
- if raw.NextEntryOffset == 0 {
- break
- }
- offset += raw.NextEntryOffset
-
- // Error!
- if offset >= n {
- w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
- break
- }
- }
-
- if err := w.startRead(watch); err != nil {
- w.Errors <- err
- }
- }
-}
-
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
- if mask == 0 {
- return false
- }
- event := newEvent(name, uint32(mask))
- select {
- case ch := <-w.quit:
- w.quit <- ch
- case w.Events <- event:
- }
- return true
-}
-
-func toWindowsFlags(mask uint64) uint32 {
- var m uint32
- if mask&sys_FS_ACCESS != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
- }
- if mask&sys_FS_MODIFY != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
- }
- if mask&sys_FS_ATTRIB != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
- }
- if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
- m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
- }
- return m
-}
-
-func toFSnotifyFlags(action uint32) uint64 {
- switch action {
- case syscall.FILE_ACTION_ADDED:
- return sys_FS_CREATE
- case syscall.FILE_ACTION_REMOVED:
- return sys_FS_DELETE
- case syscall.FILE_ACTION_MODIFIED:
- return sys_FS_MODIFY
- case syscall.FILE_ACTION_RENAMED_OLD_NAME:
- return sys_FS_MOVED_FROM
- case syscall.FILE_ACTION_RENAMED_NEW_NAME:
- return sys_FS_MOVED_TO
- }
- return 0
-}
diff --git a/vendor/src/github.com/kr/pty/ztypes_arm64.go b/vendor/src/github.com/kr/pty/ztypes_arm64.go
new file mode 100644
index 0000000000..6c29a4b918
--- /dev/null
+++ b/vendor/src/github.com/kr/pty/ztypes_arm64.go
@@ -0,0 +1,11 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+// +build arm64
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/src/github.com/mistifyio/go-zfs/.gitignore b/vendor/src/github.com/mistifyio/go-zfs/.gitignore
new file mode 100644
index 0000000000..8000dd9db4
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/.gitignore
@@ -0,0 +1 @@
+.vagrant
diff --git a/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
new file mode 100644
index 0000000000..66aab8e359
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
@@ -0,0 +1,51 @@
+## How to Contribute ##
+
+We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+
+### Reporting issues ###
+
+We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
+
+If you find a bug:
+
+* Use the GitHub issue search to check whether the bug has already been reported.
+* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+
+### Pull requests ###
+
+We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+
+[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
+
+ $ git clone git@github.com:/go-zfs.git
+ $ cd go-zfs
+ $ git remote add upstream https://github.com/mistifyio/go-zfs.git
+
+If you need to pull new changes committed upstream:
+
+ $ git checkout master
+ $ git fetch upstream
+ $ git merge upstream/master
+
+Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+
+ $ git checkout -b
+
+Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+
+ $ git commit -m "Issue # - "
+
+Push your feature branch to your fork.
+
+ $ git push origin
+
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+
+* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
+* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+
+**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
+
+----
+Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/vendor/src/github.com/mistifyio/go-zfs/LICENSE
new file mode 100644
index 0000000000..f4c265cfec
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright (c) 2014, OmniTI Computer Consulting, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/src/github.com/mistifyio/go-zfs/README.md b/vendor/src/github.com/mistifyio/go-zfs/README.md
new file mode 100644
index 0000000000..2515e588e0
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/README.md
@@ -0,0 +1,54 @@
+# Go Wrapper for ZFS #
+
+Simple wrappers for ZFS command line tools.
+
+[](https://godoc.org/github.com/mistifyio/go-zfs)
+
+## Requirements ##
+
+You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
+
+ sudo apt-get install python-software-properties
+ sudo apt-add-repository ppa:zfs-native/stable
+ sudo apt-get update
+ sudo apt-get install ubuntu-zfs libzfs-dev
+
+Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install
+
+Generally you need root privileges to use anything zfs related.
+
+## Status ##
+
+This has been only been tested on Ubuntu 14.04
+
+In the future, we hope to work directly with libzfs.
+
+# Hacking #
+
+The tests have decent examples for most functions.
+
+```go
+//assuming a zpool named test
+//error handling ommitted
+
+
+f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ok(t, err)
+
+s, err := f.Snapshot("test", nil)
+ok(t, err)
+
+// snapshot is named "test/snapshot-test@test"
+
+c, err := s.Clone("test/clone-test", nil)
+
+err := c.Destroy()
+err := s.Destroy()
+err := f.Destroy()
+
+```
+
+# Contributing #
+
+See the [contributing guidelines](./CONTRIBUTING.md)
+
diff --git a/vendor/src/github.com/mistifyio/go-zfs/error.go b/vendor/src/github.com/mistifyio/go-zfs/error.go
new file mode 100644
index 0000000000..5408ccdb55
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/error.go
@@ -0,0 +1,18 @@
+package zfs
+
+import (
+ "fmt"
+)
+
+// Error is an error which is returned when the `zfs` or `zpool` shell
+// commands return with a non-zero exit code.
+type Error struct {
+ Err error
+ Debug string
+ Stderr string
+}
+
+// Error returns the string representation of an Error.
+func (e Error) Error() string {
+ return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr)
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/error_test.go b/vendor/src/github.com/mistifyio/go-zfs/error_test.go
new file mode 100644
index 0000000000..323980ec6d
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/error_test.go
@@ -0,0 +1,37 @@
+package zfs
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+)
+
+func TestError(t *testing.T) {
+ var tests = []struct {
+ err error
+ debug string
+ stderr string
+ }{
+ // Empty error
+ {nil, "", ""},
+ // Typical error
+ {errors.New("exit status foo"), "/sbin/foo bar qux", "command not found"},
+ // Quoted error
+ {errors.New("exit status quoted"), "\"/sbin/foo\" bar qux", "\"some\" 'random' `quotes`"},
+ }
+
+ for _, test := range tests {
+ // Generate error from tests
+ zErr := Error{
+ Err: test.err,
+ Debug: test.debug,
+ Stderr: test.stderr,
+ }
+
+ // Verify output format is consistent, so that any changes to the
+ // Error method must be reflected by the test
+ if str := zErr.Error(); str != fmt.Sprintf("%s: %q => %s", test.err, test.debug, test.stderr) {
+ t.Fatalf("unexpected Error string: %v", str)
+ }
+ }
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils.go b/vendor/src/github.com/mistifyio/go-zfs/utils.go
new file mode 100644
index 0000000000..250bd5b31c
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/utils.go
@@ -0,0 +1,320 @@
+package zfs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os/exec"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type command struct {
+ Command string
+ Stdin io.Reader
+ Stdout io.Writer
+}
+
+func (c *command) Run(arg ...string) ([][]string, error) {
+
+ cmd := exec.Command(c.Command, arg...)
+
+ var stdout, stderr bytes.Buffer
+
+ if c.Stdout == nil {
+ cmd.Stdout = &stdout
+ } else {
+ cmd.Stdout = c.Stdout
+ }
+
+ if c.Stdin != nil {
+ cmd.Stdin = c.Stdin
+
+ }
+ cmd.Stderr = &stderr
+
+ debug := strings.Join([]string{cmd.Path, strings.Join(cmd.Args, " ")}, " ")
+ if logger != nil {
+ logger.Log(cmd.Args)
+ }
+ err := cmd.Run()
+
+ if err != nil {
+ return nil, &Error{
+ Err: err,
+ Debug: debug,
+ Stderr: stderr.String(),
+ }
+ }
+
+ // assume if you passed in something for stdout, that you know what to do with it
+ if c.Stdout != nil {
+ return nil, nil
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+
+ //last line is always blank
+ lines = lines[0 : len(lines)-1]
+ output := make([][]string, len(lines))
+
+ for i, l := range lines {
+ output[i] = strings.Fields(l)
+ }
+
+ return output, nil
+}
+
+func setString(field *string, value string) {
+ v := ""
+ if value != "-" {
+ v = value
+ }
+ *field = v
+}
+
+func setUint(field *uint64, value string) error {
+ var v uint64
+ if value != "-" {
+ var err error
+ v, err = strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ *field = v
+ return nil
+}
+
+func (ds *Dataset) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "available":
+ err = setUint(&ds.Avail, val)
+ case "compression":
+ setString(&ds.Compression, val)
+ case "mountpoint":
+ setString(&ds.Mountpoint, val)
+ case "quota":
+ err = setUint(&ds.Quota, val)
+ case "type":
+ setString(&ds.Type, val)
+ case "origin":
+ setString(&ds.Origin, val)
+ case "used":
+ err = setUint(&ds.Used, val)
+ case "volsize":
+ err = setUint(&ds.Volsize, val)
+ case "written":
+ err = setUint(&ds.Written, val)
+ case "logicalused":
+ err = setUint(&ds.Logicalused, val)
+ }
+ return err
+}
+
+/*
+ * from zfs diff`s escape function:
+ *
+ * Prints a file name out a character at a time. If the character is
+ * not in the range of what we consider "printable" ASCII, display it
+ * as an escaped 3-digit octal value. ASCII values less than a space
+ * are all control characters and we declare the upper end as the
+ * DELete character. This also is the last 7-bit ASCII character.
+ * We choose to treat all 8-bit ASCII as not printable for this
+ * application.
+ */
+func unescapeFilepath(path string) (string, error) {
+ buf := make([]byte, 0, len(path))
+ llen := len(path)
+ for i := 0; i < llen; {
+ if path[i] == '\\' {
+ if llen < i+4 {
+ return "", fmt.Errorf("Invalid octal code: too short")
+ }
+ octalCode := path[(i + 1):(i + 4)]
+ val, err := strconv.ParseUint(octalCode, 8, 8)
+ if err != nil {
+ return "", fmt.Errorf("Invalid octal code: %v", err)
+ }
+ buf = append(buf, byte(val))
+ i += 4
+ } else {
+ buf = append(buf, path[i])
+ i++
+ }
+ }
+ return string(buf), nil
+}
+
+var changeTypeMap = map[string]ChangeType{
+ "-": Removed,
+ "+": Created,
+ "M": Modified,
+ "R": Renamed,
+}
+var inodeTypeMap = map[string]InodeType{
+ "B": BlockDevice,
+ "C": CharacterDevice,
+ "/": Directory,
+ ">": Door,
+ "|": NamedPipe,
+ "@": SymbolicLink,
+ "P": EventPort,
+ "=": Socket,
+ "F": File,
+}
+
+// matches (+1) or (-1)
+var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+
+func parseReferenceCount(field string) (int, error) {
+ matches := referenceCountRegex.FindStringSubmatch(field)
+ if matches == nil {
+ return 0, fmt.Errorf("Regexp does not match")
+ }
+ return strconv.Atoi(matches[1])
+}
+
+func parseInodeChange(line []string) (*InodeChange, error) {
+ llen := len(line)
+ if llen < 1 {
+ return nil, fmt.Errorf("Empty line passed")
+ }
+
+ changeType := changeTypeMap[line[0]]
+ if changeType == 0 {
+ return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+ }
+
+ switch changeType {
+ case Renamed:
+ if llen != 4 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+ }
+ case Modified:
+ if llen != 4 && llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+ }
+ default:
+ if llen != 3 {
+ return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+ }
+ }
+
+ inodeType := inodeTypeMap[line[1]]
+ if inodeType == 0 {
+ return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+ }
+
+ path, err := unescapeFilepath(line[2])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+
+ var newPath string
+ var referenceCount int
+ switch changeType {
+ case Renamed:
+ newPath, err = unescapeFilepath(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ }
+ case Modified:
+ if llen == 4 {
+ referenceCount, err = parseReferenceCount(line[3])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+ }
+ }
+ default:
+ newPath = ""
+ }
+
+ return &InodeChange{
+ Change: changeType,
+ Type: inodeType,
+ Path: path,
+ NewPath: newPath,
+ ReferenceCountChange: referenceCount,
+ }, nil
+}
+
+// example input
+//M / /testpool/bar/
+//+ F /testpool/bar/hello.txt
+//M / /testpool/bar/hello.txt (+1)
+//M / /testpool/bar/hello-hardlink
+func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
+ changes := make([]*InodeChange, len(lines))
+
+ for i, line := range lines {
+ c, err := parseInodeChange(line)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+ }
+ changes[i] = c
+ }
+ return changes, nil
+}
+
+func listByType(t, filter string) ([]*Dataset, error) {
+ args := []string{"get", "all", "-t", t, "-rHp"}
+ if filter != "" {
+ args = append(args, filter)
+ }
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return datasets, nil
+}
+
+func propsSlice(properties map[string]string) []string {
+ args := make([]string, 0, len(properties)*3)
+ for k, v := range properties {
+ args = append(args, "-o")
+ args = append(args, fmt.Sprintf("%s=%s", k, v))
+ }
+ return args
+}
+
+func (z *Zpool) parseLine(line []string) error {
+ prop := line[1]
+ val := line[2]
+
+ var err error
+
+ switch prop {
+ case "health":
+ setString(&z.Health, val)
+ case "allocated":
+ err = setUint(&z.Allocated, val)
+ case "size":
+ err = setUint(&z.Size, val)
+ case "free":
+ err = setUint(&z.Free, val)
+ }
+ return err
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs.go b/vendor/src/github.com/mistifyio/go-zfs/zfs.go
new file mode 100644
index 0000000000..f43bea292e
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zfs.go
@@ -0,0 +1,382 @@
+// Package zfs provides wrappers around the ZFS command line tools.
+package zfs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// ZFS dataset types, which can indicate if a dataset is a filesystem,
+// snapshot, or volume.
+const (
+ DatasetFilesystem = "filesystem"
+ DatasetSnapshot = "snapshot"
+ DatasetVolume = "volume"
+)
+
+// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
+// or volume. The Type struct member can be used to determine a dataset's type.
+//
+// The field definitions can be found in the ZFS manual:
+// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+type Dataset struct {
+ Name string
+ Origin string
+ Used uint64
+ Avail uint64
+ Mountpoint string
+ Compression string
+ Type string
+ Written uint64
+ Volsize uint64
+ Usedbydataset uint64
+ Logicalused uint64
+ Quota uint64
+}
+
+// InodeType is the type of inode as reported by Diff
+type InodeType int
+
+// Types of Inodes
+const (
+ _ = iota // 0 == unknown type
+ BlockDevice InodeType = iota
+ CharacterDevice
+ Directory
+ Door
+ NamedPipe
+ SymbolicLink
+ EventPort
+ Socket
+ File
+)
+
+// ChangeType is the type of inode change as reported by Diff
+type ChangeType int
+
+// Types of Changes
+const (
+ _ = iota // 0 == unknown type
+ Removed ChangeType = iota
+ Created
+ Modified
+ Renamed
+)
+
+// DestroyFlag is the options flag passed to Destroy
+type DestroyFlag int
+
+// Valid destroy options
+const (
+ DestroyDefault DestroyFlag = 1 << iota
+ DestroyRecursive = 1 << iota
+ DestroyRecursiveClones = 1 << iota
+ DestroyDeferDeletion = 1 << iota
+ DestroyForceUmount = 1 << iota
+)
+
+// InodeChange represents a change as reported by Diff
+type InodeChange struct {
+ Change ChangeType
+ Type InodeType
+ Path string
+ NewPath string
+ ReferenceCountChange int
+}
+
+// Logger can be used to log commands/actions
+type Logger interface {
+ Log(cmd []string)
+}
+
+var logger Logger
+
+// SetLogger set a log handler to log all commands including arguments before
+// they are executed
+func SetLogger(l Logger) {
+ logger = l
+}
+
+// zfs is a helper function to wrap typical calls to zfs.
+func zfs(arg ...string) ([][]string, error) {
+ c := command{Command: "zfs"}
+ return c.Run(arg...)
+}
+
+// Datasets returns a slice of ZFS datasets, regardless of type.
+// A filter argument may be passed to select a dataset with the matching name,
+// or empty string ("") may be used to select all datasets.
+func Datasets(filter string) ([]*Dataset, error) {
+ return listByType("all", filter)
+}
+
+// Snapshots returns a slice of ZFS snapshots.
+// A filter argument may be passed to select a snapshot with the matching name,
+// or empty string ("") may be used to select all snapshots.
+func Snapshots(filter string) ([]*Dataset, error) {
+ return listByType(DatasetSnapshot, filter)
+}
+
+// Filesystems returns a slice of ZFS filesystems.
+// A filter argument may be passed to select a filesystem with the matching name,
+// or empty string ("") may be used to select all filesystems.
+func Filesystems(filter string) ([]*Dataset, error) {
+ return listByType(DatasetFilesystem, filter)
+}
+
+// Volumes returns a slice of ZFS volumes.
+// A filter argument may be passed to select a volume with the matching name,
+// or empty string ("") may be used to select all volumes.
+func Volumes(filter string) ([]*Dataset, error) {
+ return listByType(DatasetVolume, filter)
+}
+
+// GetDataset retrieves a single ZFS dataset by name. This dataset could be
+// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+func GetDataset(name string) (*Dataset, error) {
+ out, err := zfs("get", "all", "-Hp", name)
+ if err != nil {
+ return nil, err
+ }
+
+ ds := &Dataset{Name: name}
+ for _, line := range out {
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return ds, nil
+}
+
+// Clone clones a ZFS snapshot and returns a clone dataset.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) {
+ if d.Type != DatasetSnapshot {
+ return nil, errors.New("can only clone snapshots")
+ }
+ args := make([]string, 2, 4)
+ args[0] = "clone"
+ args[1] = "-p"
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, []string{d.Name, dest}...)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(dest)
+}
+
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
+// new snapshot with the specified name, and streams the input data into the
+// newly-created snapshot.
+func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
+ c := command{Command: "zfs", Stdin: input}
+ _, err := c.Run("receive", name)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) SendSnapshot(output io.Writer) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only send snapshots")
+ }
+
+ c := command{Command: "zfs", Stdout: output}
+ _, err := c.Run("send", d.Name)
+ return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 4, 5)
+ args[0] = "create"
+ args[1] = "-p"
+ args[2] = "-V"
+ args[3] = strconv.FormatUint(size, 10)
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
+// descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred
+// deletion.
+func (d *Dataset) Destroy(flags DestroyFlag) error {
+ args := make([]string, 1, 3)
+ args[0] = "destroy"
+ if flags&DestroyRecursive != 0 {
+ args = append(args, "-r")
+ }
+
+ if flags&DestroyRecursiveClones != 0 {
+ args = append(args, "-R")
+ }
+
+ if flags&DestroyDeferDeletion != 0 {
+ args = append(args, "-d")
+ }
+
+ if flags&DestroyForceUmount != 0 {
+ args = append(args, "-f")
+ }
+
+ args = append(args, d.Name)
+ _, err := zfs(args...)
+ return err
+}
+
+// SetProperty sets a ZFS property on the receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) SetProperty(key, val string) error {
+ prop := strings.Join([]string{key, val}, "=")
+ _, err := zfs("set", prop, d.Name)
+ return err
+}
+
+// GetProperty returns the current value of a ZFS property from the
+// receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) GetProperty(key string) (string, error) {
+ out, err := zfs("get", key, d.Name)
+ if err != nil {
+ return "", err
+ }
+
+ return out[0][2], nil
+}
+
+// Snapshots returns a slice of all ZFS snapshots of a given dataset.
+func (d *Dataset) Snapshots() ([]*Dataset, error) {
+ return Snapshots(d.Name)
+}
+
+// CreateFilesystem creates a new ZFS filesystem with the specified name and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "create"
+
+ if properties != nil {
+ args = append(args, propsSlice(properties)...)
+ }
+
+ args = append(args, name)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(name)
+}
+
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
+// specified name. Optionally, the snapshot can be taken recursively, creating
+// snapshots of all descendent filesystems in a single, atomic operation.
+func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
+ args := make([]string, 1, 4)
+ args[0] = "snapshot"
+ if recursive {
+ args = append(args, "-r")
+ }
+ snapName := fmt.Sprintf("%s@%s", d.Name, name)
+ args = append(args, snapName)
+ _, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ return GetDataset(snapName)
+}
+
+// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
+// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
+// rollback cannot be completed without this option, if more recent
+// snapshots exist.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Rollback(destroyMoreRecent bool) error {
+ if d.Type != DatasetSnapshot {
+ return errors.New("can only rollback snapshots")
+ }
+
+ args := make([]string, 1, 3)
+ args[0] = "rollback"
+ if destroyMoreRecent {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ _, err := zfs(args...)
+ return err
+}
+
+// Children returns a slice of children of the receiving ZFS dataset.
+// A recursion depth may be specified, or a depth of 0 allows unlimited
+// recursion.
+func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
+ args := []string{"get", "all", "-t", "all", "-Hp"}
+ if depth > 0 {
+ args = append(args, "-d")
+ args = append(args, strconv.FormatUint(depth, 10))
+ } else {
+ args = append(args, "-r")
+ }
+ args = append(args, d.Name)
+
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ var datasets []*Dataset
+ name := ""
+ var ds *Dataset
+ for _, line := range out {
+ if name != line[0] {
+ name = line[0]
+ ds = &Dataset{Name: name}
+ datasets = append(datasets, ds)
+ }
+ if err := ds.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+ return datasets[1:], nil
+}
+
+// Diff returns changes between a snapshot and the given ZFS dataset.
+// The snapshot name must include the filesystem part as it is possible to
+// compare clones with their origin snapshots.
+func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
+ args := []string{"diff", "-FH", snapshot, d.Name}[:]
+ out, err := zfs(args...)
+ if err != nil {
+ return nil, err
+ }
+ inodeChanges, err := parseInodeChanges(out)
+ if err != nil {
+ return nil, err
+ }
+ return inodeChanges, nil
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go
new file mode 100644
index 0000000000..e991a5cffc
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go
@@ -0,0 +1,357 @@
+package zfs_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/mistifyio/go-zfs"
+)
+
+func sleep(delay int) {
+ time.Sleep(time.Duration(delay) * time.Second)
+}
+
+func pow2(x int) int64 {
+ return int64(math.Pow(2, float64(x)))
+}
+
+//https://github.com/benbjohnson/testing
+// assert fails the test if the condition is false.
+func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
+ if !condition {
+ _, file, line, _ := runtime.Caller(1)
+ fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
+ tb.FailNow()
+ }
+}
+
+// ok fails the test if an err is not nil.
+func ok(tb testing.TB, err error) {
+ if err != nil {
+ _, file, line, _ := runtime.Caller(1)
+ fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
+ tb.FailNow()
+ }
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+ if !reflect.DeepEqual(exp, act) {
+ _, file, line, _ := runtime.Caller(1)
+ fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
+ tb.FailNow()
+ }
+}
+
+func zpoolTest(t *testing.T, fn func()) {
+ tempfiles := make([]string, 3)
+ for i := range tempfiles {
+ f, _ := ioutil.TempFile("/tmp/", "zfs-")
+ defer f.Close()
+ err := f.Truncate(pow2(30))
+ ok(t, err)
+ tempfiles[i] = f.Name()
+ defer os.Remove(f.Name())
+ }
+
+ pool, err := zfs.CreateZpool("test", nil, tempfiles...)
+ ok(t, err)
+ defer pool.Destroy()
+ ok(t, err)
+ fn()
+
+}
+
+func TestDatasets(t *testing.T) {
+ zpoolTest(t, func() {
+ _, err := zfs.Datasets("")
+ ok(t, err)
+
+ ds, err := zfs.GetDataset("test")
+ ok(t, err)
+ equals(t, zfs.DatasetFilesystem, ds.Type)
+ equals(t, "", ds.Origin)
+ assert(t, ds.Logicalused > 0, "Logicalused is not greater than 0")
+ })
+}
+
+func TestSnapshots(t *testing.T) {
+
+ zpoolTest(t, func() {
+ snapshots, err := zfs.Snapshots("")
+ ok(t, err)
+
+ for _, snapshot := range snapshots {
+ equals(t, zfs.DatasetSnapshot, snapshot.Type)
+ }
+ })
+}
+
+func TestFilesystems(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/filesystem-test", nil)
+ ok(t, err)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestCreateFilesystemWithProperties(t *testing.T) {
+ zpoolTest(t, func() {
+ props := map[string]string{
+ "compression": "lz4",
+ }
+
+ f, err := zfs.CreateFilesystem("test/filesystem-test", props)
+ ok(t, err)
+
+ equals(t, "lz4", f.Compression)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestVolumes(t *testing.T) {
+ zpoolTest(t, func() {
+ v, err := zfs.CreateVolume("test/volume-test", uint64(pow2(23)), nil)
+ ok(t, err)
+
+ // volumes are sometimes "busy" if you try to manipulate them right away
+ sleep(1)
+
+ equals(t, zfs.DatasetVolume, v.Type)
+ volumes, err := zfs.Volumes("")
+ ok(t, err)
+
+ for _, volume := range volumes {
+ equals(t, zfs.DatasetVolume, volume.Type)
+ }
+
+ ok(t, v.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestSnapshot(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ ok(t, err)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ s, err := f.Snapshot("test", false)
+ ok(t, err)
+
+ equals(t, zfs.DatasetSnapshot, s.Type)
+
+ equals(t, "test/snapshot-test@test", s.Name)
+
+ ok(t, s.Destroy(zfs.DestroyDefault))
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestClone(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ ok(t, err)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ s, err := f.Snapshot("test", false)
+ ok(t, err)
+
+ equals(t, zfs.DatasetSnapshot, s.Type)
+ equals(t, "test/snapshot-test@test", s.Name)
+
+ c, err := s.Clone("test/clone-test", nil)
+ ok(t, err)
+
+ equals(t, zfs.DatasetFilesystem, c.Type)
+
+ ok(t, c.Destroy(zfs.DestroyDefault))
+
+ ok(t, s.Destroy(zfs.DestroyDefault))
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestSendSnapshot(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ ok(t, err)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ s, err := f.Snapshot("test", false)
+ ok(t, err)
+
+ file, _ := ioutil.TempFile("/tmp/", "zfs-")
+ defer file.Close()
+ err = file.Truncate(pow2(30))
+ ok(t, err)
+ defer os.Remove(file.Name())
+
+ err = s.SendSnapshot(file)
+ ok(t, err)
+
+ ok(t, s.Destroy(zfs.DestroyDefault))
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestChildren(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ ok(t, err)
+
+ s, err := f.Snapshot("test", false)
+ ok(t, err)
+
+ equals(t, zfs.DatasetSnapshot, s.Type)
+ equals(t, "test/snapshot-test@test", s.Name)
+
+ children, err := f.Children(0)
+ ok(t, err)
+
+ equals(t, 1, len(children))
+ equals(t, "test/snapshot-test@test", children[0].Name)
+
+ ok(t, s.Destroy(zfs.DestroyDefault))
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestListZpool(t *testing.T) {
+ zpoolTest(t, func() {
+ _, err := zfs.ListZpools()
+ ok(t, err)
+ })
+}
+
+func TestRollback(t *testing.T) {
+ zpoolTest(t, func() {
+ f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ ok(t, err)
+
+ filesystems, err := zfs.Filesystems("")
+ ok(t, err)
+
+ for _, filesystem := range filesystems {
+ equals(t, zfs.DatasetFilesystem, filesystem.Type)
+ }
+
+ s1, err := f.Snapshot("test", false)
+ ok(t, err)
+
+ _, err = f.Snapshot("test2", false)
+ ok(t, err)
+
+ s3, err := f.Snapshot("test3", false)
+ ok(t, err)
+
+ err = s3.Rollback(false)
+ ok(t, err)
+
+ err = s1.Rollback(false)
+ assert(t, ok != nil, "should error when rolling back beyond most recent without destroyMoreRecent = true")
+
+ err = s1.Rollback(true)
+ ok(t, err)
+
+ ok(t, s1.Destroy(zfs.DestroyDefault))
+
+ ok(t, f.Destroy(zfs.DestroyDefault))
+ })
+}
+
+func TestDiff(t *testing.T) {
+ zpoolTest(t, func() {
+ fs, err := zfs.CreateFilesystem("test/origin", nil)
+ ok(t, err)
+
+ linkedFile, err := os.Create(filepath.Join(fs.Mountpoint, "linked"))
+ ok(t, err)
+
+ movedFile, err := os.Create(filepath.Join(fs.Mountpoint, "file"))
+ ok(t, err)
+
+ snapshot, err := fs.Snapshot("snapshot", false)
+ ok(t, err)
+
+ unicodeFile, err := os.Create(filepath.Join(fs.Mountpoint, "i ❤ unicode"))
+ ok(t, err)
+
+ err = os.Rename(movedFile.Name(), movedFile.Name()+"-new")
+ ok(t, err)
+
+ err = os.Link(linkedFile.Name(), linkedFile.Name()+"_hard")
+ ok(t, err)
+
+ inodeChanges, err := fs.Diff(snapshot.Name)
+ ok(t, err)
+ equals(t, 4, len(inodeChanges))
+
+ equals(t, "/test/origin/", inodeChanges[0].Path)
+ equals(t, zfs.Directory, inodeChanges[0].Type)
+ equals(t, zfs.Modified, inodeChanges[0].Change)
+
+ equals(t, "/test/origin/linked", inodeChanges[1].Path)
+ equals(t, zfs.File, inodeChanges[1].Type)
+ equals(t, zfs.Modified, inodeChanges[1].Change)
+ equals(t, 1, inodeChanges[1].ReferenceCountChange)
+
+ equals(t, "/test/origin/file", inodeChanges[2].Path)
+ equals(t, "/test/origin/file-new", inodeChanges[2].NewPath)
+ equals(t, zfs.File, inodeChanges[2].Type)
+ equals(t, zfs.Renamed, inodeChanges[2].Change)
+
+ equals(t, "/test/origin/i ❤ unicode", inodeChanges[3].Path)
+ equals(t, zfs.File, inodeChanges[3].Type)
+ equals(t, zfs.Created, inodeChanges[3].Change)
+
+ ok(t, movedFile.Close())
+ ok(t, unicodeFile.Close())
+ ok(t, linkedFile.Close())
+ ok(t, snapshot.Destroy(zfs.DestroyForceUmount))
+ ok(t, fs.Destroy(zfs.DestroyForceUmount))
+ })
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zpool.go b/vendor/src/github.com/mistifyio/go-zfs/zpool.go
new file mode 100644
index 0000000000..59be0a84ce
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zpool.go
@@ -0,0 +1,108 @@
+package zfs
+
+// ZFS zpool states, which can indicate if a pool is online, offline,
+// degraded, etc. More information regarding zpool states can be found here:
+// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+const (
+ ZpoolOnline = "ONLINE"
+ ZpoolDegraded = "DEGRADED"
+ ZpoolFaulted = "FAULTED"
+ ZpoolOffline = "OFFLINE"
+ ZpoolUnavail = "UNAVAIL"
+ ZpoolRemoved = "REMOVED"
+)
+
+// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
+// contain many descendent datasets.
+type Zpool struct {
+ Name string
+ Health string
+ Allocated uint64
+ Size uint64
+ Free uint64
+}
+
+// zpool is a helper function to wrap typical calls to zpool.
+func zpool(arg ...string) ([][]string, error) {
+ c := command{Command: "zpool"}
+ return c.Run(arg...)
+}
+
+// GetZpool retrieves a single ZFS zpool by name.
+func GetZpool(name string) (*Zpool, error) {
+ out, err := zpool("get", "all", "-p", name)
+ if err != nil {
+ return nil, err
+ }
+
+ // there is no -H
+ out = out[1:]
+
+ z := &Zpool{Name: name}
+ for _, line := range out {
+ if err := z.parseLine(line); err != nil {
+ return nil, err
+ }
+ }
+
+ return z, nil
+}
+
+// Datasets returns a slice of all ZFS datasets in a zpool.
+func (z *Zpool) Datasets() ([]*Dataset, error) {
+ return Datasets(z.Name)
+}
+
+// Snapshots returns a slice of all ZFS snapshots in a zpool.
+func (z *Zpool) Snapshots() ([]*Dataset, error) {
+ return Snapshots(z.Name)
+}
+
+// CreateZpool creates a new ZFS zpool with the specified name, properties,
+// and optional arguments.
+// A full list of available ZFS properties and command-line arguments may be
+// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
+ cli := make([]string, 1, 4)
+ cli[0] = "create"
+ if properties != nil {
+ cli = append(cli, propsSlice(properties)...)
+ }
+ cli = append(cli, name)
+ cli = append(cli, args...)
+ _, err := zpool(cli...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Zpool{Name: name}, nil
+}
+
+// Destroy destroys a ZFS zpool by name.
+func (z *Zpool) Destroy() error {
+ _, err := zpool("destroy", z.Name)
+ return err
+}
+
+// ListZpools list all ZFS zpools accessible on the current system.
+func ListZpools() ([]*Zpool, error) {
+ args := []string{"list", "-Ho", "name"}
+ out, err := zpool(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // there is no -H
+ out = out[1:]
+
+ var pools []*Zpool
+
+ for _, line := range out {
+ z, err := GetZpool(line[0])
+ if err != nil {
+ return nil, err
+ }
+ pools = append(pools, z)
+ }
+ return pools, nil
+}
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enum.go b/vendor/src/github.com/syndtr/gocapability/capability/enum.go
index bff756a5ce..fd0ce7fe8e 100644
--- a/vendor/src/github.com/syndtr/gocapability/capability/enum.go
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enum.go
@@ -34,90 +34,9 @@ const (
BOUNDS = BOUNDING
)
+//go:generate go run enumgen/gen.go
type Cap int
-func (c Cap) String() string {
- switch c {
- case CAP_CHOWN:
- return "chown"
- case CAP_DAC_OVERRIDE:
- return "dac_override"
- case CAP_DAC_READ_SEARCH:
- return "dac_read_search"
- case CAP_FOWNER:
- return "fowner"
- case CAP_FSETID:
- return "fsetid"
- case CAP_KILL:
- return "kill"
- case CAP_SETGID:
- return "setgid"
- case CAP_SETUID:
- return "setuid"
- case CAP_SETPCAP:
- return "setpcap"
- case CAP_LINUX_IMMUTABLE:
- return "linux_immutable"
- case CAP_NET_BIND_SERVICE:
- return "net_bind_service"
- case CAP_NET_BROADCAST:
- return "net_broadcast"
- case CAP_NET_ADMIN:
- return "net_admin"
- case CAP_NET_RAW:
- return "net_raw"
- case CAP_IPC_LOCK:
- return "ipc_lock"
- case CAP_IPC_OWNER:
- return "ipc_owner"
- case CAP_SYS_MODULE:
- return "sys_module"
- case CAP_SYS_RAWIO:
- return "sys_rawio"
- case CAP_SYS_CHROOT:
- return "sys_chroot"
- case CAP_SYS_PTRACE:
- return "sys_ptrace"
- case CAP_SYS_PACCT:
- return "sys_psacct"
- case CAP_SYS_ADMIN:
- return "sys_admin"
- case CAP_SYS_BOOT:
- return "sys_boot"
- case CAP_SYS_NICE:
- return "sys_nice"
- case CAP_SYS_RESOURCE:
- return "sys_resource"
- case CAP_SYS_TIME:
- return "sys_time"
- case CAP_SYS_TTY_CONFIG:
- return "sys_tty_config"
- case CAP_MKNOD:
- return "mknod"
- case CAP_LEASE:
- return "lease"
- case CAP_AUDIT_WRITE:
- return "audit_write"
- case CAP_AUDIT_CONTROL:
- return "audit_control"
- case CAP_SETFCAP:
- return "setfcap"
- case CAP_MAC_OVERRIDE:
- return "mac_override"
- case CAP_MAC_ADMIN:
- return "mac_admin"
- case CAP_SYSLOG:
- return "syslog"
- case CAP_WAKE_ALARM:
- return "wake_alarm"
- case CAP_BLOCK_SUSPEND:
- return "block_suspend"
- case CAP_AUDIT_READ:
- return "audit_read"
- }
- return "unknown"
-}
-
// POSIX-draft defined capabilities.
const (
// In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go
new file mode 100644
index 0000000000..b9e6d2d5e1
--- /dev/null
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go
@@ -0,0 +1,129 @@
+// generated file; DO NOT EDIT - use go generate in directory with source
+
+package capability
+
+func (c Cap) String() string {
+ switch c {
+ case CAP_CHOWN:
+ return "chown"
+ case CAP_DAC_OVERRIDE:
+ return "dac_override"
+ case CAP_DAC_READ_SEARCH:
+ return "dac_read_search"
+ case CAP_FOWNER:
+ return "fowner"
+ case CAP_FSETID:
+ return "fsetid"
+ case CAP_KILL:
+ return "kill"
+ case CAP_SETGID:
+ return "setgid"
+ case CAP_SETUID:
+ return "setuid"
+ case CAP_SETPCAP:
+ return "setpcap"
+ case CAP_LINUX_IMMUTABLE:
+ return "linux_immutable"
+ case CAP_NET_BIND_SERVICE:
+ return "net_bind_service"
+ case CAP_NET_BROADCAST:
+ return "net_broadcast"
+ case CAP_NET_ADMIN:
+ return "net_admin"
+ case CAP_NET_RAW:
+ return "net_raw"
+ case CAP_IPC_LOCK:
+ return "ipc_lock"
+ case CAP_IPC_OWNER:
+ return "ipc_owner"
+ case CAP_SYS_MODULE:
+ return "sys_module"
+ case CAP_SYS_RAWIO:
+ return "sys_rawio"
+ case CAP_SYS_CHROOT:
+ return "sys_chroot"
+ case CAP_SYS_PTRACE:
+ return "sys_ptrace"
+ case CAP_SYS_PACCT:
+ return "sys_pacct"
+ case CAP_SYS_ADMIN:
+ return "sys_admin"
+ case CAP_SYS_BOOT:
+ return "sys_boot"
+ case CAP_SYS_NICE:
+ return "sys_nice"
+ case CAP_SYS_RESOURCE:
+ return "sys_resource"
+ case CAP_SYS_TIME:
+ return "sys_time"
+ case CAP_SYS_TTY_CONFIG:
+ return "sys_tty_config"
+ case CAP_MKNOD:
+ return "mknod"
+ case CAP_LEASE:
+ return "lease"
+ case CAP_AUDIT_WRITE:
+ return "audit_write"
+ case CAP_AUDIT_CONTROL:
+ return "audit_control"
+ case CAP_SETFCAP:
+ return "setfcap"
+ case CAP_MAC_OVERRIDE:
+ return "mac_override"
+ case CAP_MAC_ADMIN:
+ return "mac_admin"
+ case CAP_SYSLOG:
+ return "syslog"
+ case CAP_WAKE_ALARM:
+ return "wake_alarm"
+ case CAP_BLOCK_SUSPEND:
+ return "block_suspend"
+ case CAP_AUDIT_READ:
+ return "audit_read"
+ }
+ return "unknown"
+}
+
+// List returns list of all supported capabilities
+func List() []Cap {
+ return []Cap{
+ CAP_CHOWN,
+ CAP_DAC_OVERRIDE,
+ CAP_DAC_READ_SEARCH,
+ CAP_FOWNER,
+ CAP_FSETID,
+ CAP_KILL,
+ CAP_SETGID,
+ CAP_SETUID,
+ CAP_SETPCAP,
+ CAP_LINUX_IMMUTABLE,
+ CAP_NET_BIND_SERVICE,
+ CAP_NET_BROADCAST,
+ CAP_NET_ADMIN,
+ CAP_NET_RAW,
+ CAP_IPC_LOCK,
+ CAP_IPC_OWNER,
+ CAP_SYS_MODULE,
+ CAP_SYS_RAWIO,
+ CAP_SYS_CHROOT,
+ CAP_SYS_PTRACE,
+ CAP_SYS_PACCT,
+ CAP_SYS_ADMIN,
+ CAP_SYS_BOOT,
+ CAP_SYS_NICE,
+ CAP_SYS_RESOURCE,
+ CAP_SYS_TIME,
+ CAP_SYS_TTY_CONFIG,
+ CAP_MKNOD,
+ CAP_LEASE,
+ CAP_AUDIT_WRITE,
+ CAP_AUDIT_CONTROL,
+ CAP_SETFCAP,
+ CAP_MAC_OVERRIDE,
+ CAP_MAC_ADMIN,
+ CAP_SYSLOG,
+ CAP_WAKE_ALARM,
+ CAP_BLOCK_SUSPEND,
+ CAP_AUDIT_READ,
+ }
+}
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go b/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
new file mode 100644
index 0000000000..4c733809b1
--- /dev/null
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+)
+
+const fileName = "enum.go"
+const genName = "enum_gen.go"
+
+type generator struct {
+ buf bytes.Buffer
+ caps []string
+}
+
+func (g *generator) writeHeader() {
+ g.buf.WriteString("// generated file; DO NOT EDIT - use go generate in directory with source\n")
+ g.buf.WriteString("\n")
+ g.buf.WriteString("package capability")
+}
+
+func (g *generator) writeStringFunc() {
+ g.buf.WriteString("\n")
+ g.buf.WriteString("func (c Cap) String() string {\n")
+ g.buf.WriteString("switch c {\n")
+ for _, cap := range g.caps {
+ fmt.Fprintf(&g.buf, "case %s:\n", cap)
+ fmt.Fprintf(&g.buf, "return \"%s\"\n", strings.ToLower(cap[4:]))
+ }
+ g.buf.WriteString("}\n")
+ g.buf.WriteString("return \"unknown\"\n")
+ g.buf.WriteString("}\n")
+}
+
+func (g *generator) writeListFunc() {
+ g.buf.WriteString("\n")
+ g.buf.WriteString("// List returns list of all supported capabilities\n")
+ g.buf.WriteString("func List() []Cap {\n")
+ g.buf.WriteString("return []Cap{\n")
+ for _, cap := range g.caps {
+ fmt.Fprintf(&g.buf, "%s,\n", cap)
+ }
+ g.buf.WriteString("}\n")
+ g.buf.WriteString("}\n")
+}
+
+func main() {
+ fs := token.NewFileSet()
+ parsedFile, err := parser.ParseFile(fs, fileName, nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ var caps []string
+ for _, decl := range parsedFile.Decls {
+ decl, ok := decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ vspec := spec.(*ast.ValueSpec)
+ name := vspec.Names[0].Name
+ if strings.HasPrefix(name, "CAP_") {
+ caps = append(caps, name)
+ }
+ }
+ }
+ g := &generator{caps: caps}
+ g.writeHeader()
+ g.writeStringFunc()
+ g.writeListFunc()
+ src, err := format.Source(g.buf.Bytes())
+ if err != nil {
+ fmt.Println("generated invalid Go code")
+ fmt.Println(g.buf.String())
+ log.Fatal(err)
+ }
+ fi, err := os.Stat(fileName)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := ioutil.WriteFile(genName, src, fi.Mode().Perm()); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/tchap/go-patricia/README.md b/vendor/src/github.com/tchap/go-patricia/README.md
index 11ee4612d3..9d6ebc43a5 100644
--- a/vendor/src/github.com/tchap/go-patricia/README.md
+++ b/vendor/src/github.com/tchap/go-patricia/README.md
@@ -50,9 +50,12 @@ printItem := func(prefix patricia.Prefix, item patricia.Item) error {
return nil
}
-// Create a new tree.
+// Create a new default trie (using the default parameter values).
trie := NewTrie()
+// Create a new custom trie.
+trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
+
// Insert some items.
trie.Insert(Prefix("Pepa Novak"), 1)
trie.Insert(Prefix("Pepa Sindelar"), 2)
@@ -67,12 +70,12 @@ key = Prefix("Karel")
fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
// Anybody called "Karel" here? true
-// Walk the tree.
+// Walk the tree in alphabetical order.
trie.Visit(printItem)
+// "Karel Hynek Macha": 4
+// "Karel Macha": 3
// "Pepa Novak": 1
// "Pepa Sindelar": 2
-// "Karel Macha": 3
-// "Karel Hynek Macha": 4
// Walk a subtree.
trie.VisitSubtree(Prefix("Pepa"), printItem)
@@ -96,8 +99,8 @@ trie.Delete(Prefix("Karel Macha"))
// Walk again.
trie.Visit(printItem)
-// "Pepa Sindelar": 2
// "Karel Hynek Macha": 10
+// "Pepa Sindelar": 2
// Delete a subtree.
trie.DeleteSubtree(Prefix("Pepa"))
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/children.go b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
index 07d3326335..a204b0c8a9 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/children.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
@@ -5,11 +5,7 @@
package patricia
-// Max prefix length that is kept in a single trie node.
-var MaxPrefixPerNode = 10
-
-// Max children to keep in a node in the sparse mode.
-const MaxChildrenPerSparseNode = 8
+import "sort"
type childList interface {
length() int
@@ -21,13 +17,28 @@ type childList interface {
walk(prefix *Prefix, visitor VisitorFunc) error
}
-type sparseChildList struct {
- children []*Trie
+type tries []*Trie
+
+func (t tries) Len() int {
+ return len(t)
}
-func newSparseChildList() childList {
+func (t tries) Less(i, j int) bool {
+ strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)}
+ return strings.Less(0, 1)
+}
+
+func (t tries) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+type sparseChildList struct {
+ children tries
+}
+
+func newSparseChildList(maxChildrenPerSparseNode int) childList {
return &sparseChildList{
- children: make([]*Trie, 0, MaxChildrenPerSparseNode),
+ children: make(tries, 0, DefaultMaxChildrenPerSparseNode),
}
}
@@ -82,6 +93,9 @@ func (list *sparseChildList) next(b byte) *Trie {
}
func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+
+ sort.Sort(list.children)
+
for _, child := range list.children {
*prefix = append(*prefix, child.prefix...)
if child.item != nil {
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
index 8fcbcdf426..a8c3786b62 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
@@ -13,6 +13,11 @@ import (
// Trie
//------------------------------------------------------------------------------
+const (
+ DefaultMaxPrefixPerNode = 10
+ DefaultMaxChildrenPerSparseNode = 8
+)
+
type (
Prefix []byte
Item interface{}
@@ -27,15 +32,44 @@ type Trie struct {
prefix Prefix
item Item
+ maxPrefixPerNode int
+ maxChildrenPerSparseNode int
+
children childList
}
// Public API ------------------------------------------------------------------
+type Option func(*Trie)
+
// Trie constructor.
-func NewTrie() *Trie {
- return &Trie{
- children: newSparseChildList(),
+func NewTrie(options ...Option) *Trie {
+ trie := &Trie{}
+
+ for _, opt := range options {
+ opt(trie)
+ }
+
+ if trie.maxPrefixPerNode <= 0 {
+ trie.maxPrefixPerNode = DefaultMaxPrefixPerNode
+ }
+ if trie.maxChildrenPerSparseNode <= 0 {
+ trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode
+ }
+
+ trie.children = newSparseChildList(trie.maxChildrenPerSparseNode)
+ return trie
+}
+
+func MaxPrefixPerNode(value int) Option {
+ return func(trie *Trie) {
+ trie.maxPrefixPerNode = value
+ }
+}
+
+func MaxChildrenPerSparseNode(value int) Option {
+ return func(trie *Trie) {
+ trie.maxChildrenPerSparseNode = value
}
}
@@ -85,7 +119,8 @@ func (trie *Trie) MatchSubtree(key Prefix) (matched bool) {
return
}
-// Visit calls visitor on every node containing a non-nil item.
+// Visit calls visitor on every node containing a non-nil item
+// in alphabetical order.
//
// If an error is returned from visitor, the function stops visiting the tree
// and returns that error, unless it is a special error - SkipSubtree. In that
@@ -233,7 +268,7 @@ func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) {
// If we are in the root of the trie, reset the trie.
if parent == nil {
root.prefix = nil
- root.children = newSparseChildList()
+ root.children = newSparseChildList(trie.maxPrefixPerNode)
return true
}
@@ -257,12 +292,12 @@ func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) {
)
if node.prefix == nil {
- if len(key) <= MaxPrefixPerNode {
+ if len(key) <= trie.maxPrefixPerNode {
node.prefix = key
goto InsertItem
}
- node.prefix = key[:MaxPrefixPerNode]
- key = key[MaxPrefixPerNode:]
+ node.prefix = key[:trie.maxPrefixPerNode]
+ key = key[trie.maxPrefixPerNode:]
goto AppendChild
}
@@ -306,14 +341,14 @@ AppendChild:
// This loop starts with empty node.prefix that needs to be filled.
for len(key) != 0 {
child := NewTrie()
- if len(key) <= MaxPrefixPerNode {
+ if len(key) <= trie.maxPrefixPerNode {
child.prefix = key
node.children = node.children.add(child)
node = child
goto InsertItem
} else {
- child.prefix = key[:MaxPrefixPerNode]
- key = key[MaxPrefixPerNode:]
+ child.prefix = key[:trie.maxPrefixPerNode]
+ key = key[trie.maxPrefixPerNode:]
node.children = node.children.add(child)
node = child
}
@@ -344,7 +379,7 @@ func (trie *Trie) compact() *Trie {
}
// Make sure the combined prefixes fit into a single node.
- if len(trie.prefix)+len(child.prefix) > MaxPrefixPerNode {
+ if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode {
return trie
}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
index 346e9a66cb..96089fceb4 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
@@ -55,7 +55,7 @@ func TestTrie_InsertDensePreceeding(t *testing.T) {
trie := NewTrie()
start := byte(70)
// create a dense node
- for i := byte(0); i <= MaxChildrenPerSparseNode; i++ {
+ for i := byte(0); i <= DefaultMaxChildrenPerSparseNode; i++ {
if !trie.Insert(Prefix([]byte{start + i}), true) {
t.Errorf("insert failed, prefix=%v", start+i)
}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
index 27f3c878b5..b35c9e2ef5 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
@@ -300,10 +300,10 @@ func TestTrie_VisitReturnError(t *testing.T) {
someErr := errors.New("Something exploded")
if err := trie.Visit(func(prefix Prefix, item Item) error {
t.Logf("VISITING prefix=%q, item=%v", prefix, item)
- if item.(int) == 0 {
+ if item.(int) == 3 {
return someErr
}
- if item.(int) != 0 {
+ if item.(int) != 3 {
t.Errorf("Unexpected prefix encountered, %q", prefix)
}
return nil
@@ -598,10 +598,10 @@ func ExampleTrie() {
// Walk the tree.
trie.Visit(printItem)
+ // "Karel Hynek Macha": 4
+ // "Karel Macha": 3
// "Pepa Novak": 1
// "Pepa Sindelar": 2
- // "Karel Macha": 3
- // "Karel Hynek Macha": 4
// Walk a subtree.
trie.VisitSubtree(Prefix("Pepa"), printItem)
@@ -625,8 +625,8 @@ func ExampleTrie() {
// Walk again.
trie.Visit(printItem)
- // "Pepa Sindelar": 2
// "Karel Hynek Macha": 10
+ // "Pepa Sindelar": 2
// Delete a subtree.
trie.DeleteSubtree(Prefix("Pepa"))
@@ -638,16 +638,16 @@ func ExampleTrie() {
// Output:
// "Pepa Novak" present? true
// Anybody called "Karel" here? true
- // "Pepa Novak": 1
- // "Pepa Sindelar": 2
- // "Karel Macha": 3
// "Karel Hynek Macha": 4
+ // "Karel Macha": 3
+ // "Pepa Novak": 1
+ // "Pepa Sindelar": 2
// "Pepa Novak": 1
// "Pepa Sindelar": 2
// "Karel Hynek Macha": 10
// "Karel Hynek Macha": 10
- // "Pepa Sindelar": 2
// "Karel Hynek Macha": 10
+ // "Pepa Sindelar": 2
// "Karel Hynek Macha": 10
}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
index ce5ae378fa..12c441b621 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
@@ -13,6 +13,20 @@ import (
// Tests -----------------------------------------------------------------------
+func TestTrie_ConstructorOptions(t *testing.T) {
+ trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
+
+ if trie.maxPrefixPerNode != 16 {
+ t.Errorf("Unexpected trie.maxPrefixPerNode value, expected=%v, got=%v",
+ 16, trie.maxPrefixPerNode)
+ }
+
+ if trie.maxChildrenPerSparseNode != 10 {
+ t.Errorf("Unexpected trie.maxChildrenPerSparseNode value, expected=%v, got=%v",
+ 10, trie.maxChildrenPerSparseNode)
+ }
+}
+
func TestTrie_GetNonexistentPrefix(t *testing.T) {
trie := NewTrie()
diff --git a/vendor/src/github.com/vishvananda/netlink/.travis.yml b/vendor/src/github.com/vishvananda/netlink/.travis.yml
new file mode 100644
index 0000000000..1970069d51
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+install:
+ - go get github.com/vishvananda/netns
diff --git a/vendor/src/github.com/vishvananda/netlink/LICENSE b/vendor/src/github.com/vishvananda/netlink/LICENSE
new file mode 100644
index 0000000000..9f64db8582
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/vishvananda/netlink/Makefile b/vendor/src/github.com/vishvananda/netlink/Makefile
new file mode 100644
index 0000000000..b3250185f4
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/Makefile
@@ -0,0 +1,29 @@
+DIRS := \
+ . \
+ nl
+
+DEPS = \
+ github.com/vishvananda/netns
+
+uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go))))
+goroot = $(addprefix ../../../,$(1))
+unroot = $(subst ../../../,,$(1))
+fmt = $(addprefix fmt-,$(1))
+
+all: fmt
+
+$(call goroot,$(DEPS)):
+ go get $(call unroot,$@)
+
+.PHONY: $(call testdirs,$(DIRS))
+$(call testdirs,$(DIRS)):
+ sudo -E go test -v github.com/vishvananda/netlink/$@
+
+$(call fmt,$(call testdirs,$(DIRS))):
+ ! gofmt -l $(subst fmt-,,$@)/*.go | grep ''
+
+.PHONY: fmt
+fmt: $(call fmt,$(call testdirs,$(DIRS)))
+
+test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS))
diff --git a/vendor/src/github.com/vishvananda/netlink/README.md b/vendor/src/github.com/vishvananda/netlink/README.md
new file mode 100644
index 0000000000..555f886523
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/README.md
@@ -0,0 +1,83 @@
+# netlink - netlink library for go #
+
+[](https://travis-ci.org/vishvananda/netlink) [](https://godoc.org/github.com/vishvananda/netlink)
+
+The netlink package provides a simple netlink library for go. Netlink
+is the interface a user-space program in linux uses to communicate with
+the kernel. It can be used to add and remove interfaces, set ip addresses
+and routes, and configure ipsec. Netlink communication requires elevated
+privileges, so in most cases this code needs to be run as root. Since
+low-level netlink messages are inscrutable at best, the library attempts
+to provide an api that is loosely modeled on the CLI provied by iproute2.
+Actions like `ip link add` will be accomplished via a similarly named
+function like AddLink(). This library began its life as a fork of the
+netlink functionality in
+[docker/libcontainer](https://github.com/docker/libcontainer) but was
+heavily rewritten to improve testability, performance, and to add new
+functionality like ipsec xfrm handling.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+ go get github.com/vishvananda/netlink
+
+Testing dependencies:
+
+ go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+ sudo -E go test github.com/vishvananda/netlink
+
+## Examples ##
+
+Add a new bridge and add eth1 into it:
+
+```go
+package main
+
+import (
+ "net"
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ mybridge := &netlink.Bridge{netlink.LinkAttrs{Name: "foo"}}
+ _ := netlink.LinkAdd(mybridge)
+ eth1, _ := netlink.LinkByName("eth1")
+ netlink.LinkSetMaster(eth1, mybridge)
+}
+
+```
+
+Add a new ip address to loopback:
+
+```go
+package main
+
+import (
+ "net"
+ "github.com/vishvananda/netlink"
+)
+
+func main() {
+ lo, _ := netlink.LinkByName("lo")
+ addr, _ := netlink.ParseAddr("169.254.169.254/32")
+ netlink.AddrAdd(lo, addr)
+}
+
+```
+
+## Future Work ##
+
+Many pieces of netlink are not yet fully supported in the high-level
+interface. Aspects of virtually all of the high-level objects don't exist.
+Many of the underlying primitives are there, so its a matter of putting
+the right fields into the high-level objects and making sure that they
+are serialized and deserialized correctly in the Add and List methods.
+
+There are also a few pieces of low level netlink functionality that still
+need to be implemented. Routing rules are not in place and some of the
+more advanced link types. Hopefully there is decent structure and testing
+in place to make these fairly straightforward to add.
diff --git a/vendor/src/github.com/vishvananda/netlink/addr.go b/vendor/src/github.com/vishvananda/netlink/addr.go
new file mode 100644
index 0000000000..5c12f4e998
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr.go
@@ -0,0 +1,43 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// Addr represents an IP address from netlink. Netlink ip addresses
+// include a mask, so it stores the address as a net.IPNet.
+type Addr struct {
+ *net.IPNet
+ Label string
+}
+
+// String returns $ip/$netmask $label
+func (addr Addr) String() string {
+ return fmt.Sprintf("%s %s", addr.IPNet, addr.Label)
+}
+
+// ParseAddr parses the string representation of an address in the
+// form $ip/$netmask $label. The label portion is optional
+func ParseAddr(s string) (*Addr, error) {
+ label := ""
+ parts := strings.Split(s, " ")
+ if len(parts) > 1 {
+ s = parts[0]
+ label = parts[1]
+ }
+ m, err := ParseIPNet(s)
+ if err != nil {
+ return nil, err
+ }
+ return &Addr{IPNet: m, Label: label}, nil
+}
+
+// Equal returns true if both Addrs have the same net.IPNet value.
+func (a Addr) Equal(x Addr) bool {
+ sizea, _ := a.Mask.Size()
+ sizeb, _ := x.Mask.Size()
+ // ignore label for comparison
+ return a.IP.Equal(x.IP) && sizea == sizeb
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/addr_linux.go b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
new file mode 100644
index 0000000000..dd26f4aec7
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
@@ -0,0 +1,114 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func AddrAdd(link Link, addr *Addr) error {
+
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return addrHandle(link, addr, req)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func AddrDel(link Link, addr *Addr) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
+ return addrHandle(link, addr, req)
+}
+
+func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
+ base := link.Attrs()
+ if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
+ return fmt.Errorf("label must begin with interface name")
+ }
+ ensureIndex(base)
+
+ family := nl.GetIPFamily(addr.IP)
+
+ msg := nl.NewIfAddrmsg(family)
+ msg.Index = uint32(base.Index)
+ prefixlen, _ := addr.Mask.Size()
+ msg.Prefixlen = uint8(prefixlen)
+ req.AddData(msg)
+
+ var addrData []byte
+ if family == FAMILY_V4 {
+ addrData = addr.IP.To4()
+ } else {
+ addrData = addr.IP.To16()
+ }
+
+ localData := nl.NewRtAttr(syscall.IFA_LOCAL, addrData)
+ req.AddData(localData)
+
+ addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, addrData)
+ req.AddData(addressData)
+
+ if addr.Label != "" {
+ labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
+ req.AddData(labelData)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func AddrList(link Link, family int) ([]Addr, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR)
+ if err != nil {
+ return nil, err
+ }
+
+ index := 0
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ index = base.Index
+ }
+
+ res := make([]Addr, 0)
+ for _, m := range msgs {
+ msg := nl.DeserializeIfAddrmsg(m)
+
+ if link != nil && msg.Index != uint32(index) {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ var addr Addr
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFA_ADDRESS:
+ addr.IPNet = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+ }
+ case syscall.IFA_LABEL:
+ addr.Label = string(attr.Value[:len(attr.Value)-1])
+ }
+ }
+ res = append(res, addr)
+ }
+
+ return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/addr_test.go b/vendor/src/github.com/vishvananda/netlink/addr_test.go
new file mode 100644
index 0000000000..45e22c0526
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr_test.go
@@ -0,0 +1,45 @@
+package netlink
+
+import (
+ "testing"
+)
+
+func TestAddrAddDel(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ link, err := LinkByName("lo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addr, err := ParseAddr("127.1.1.1/24 local")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err = AddrAdd(link, addr); err != nil {
+ t.Fatal(err)
+ }
+
+ addrs, err := AddrList(link, FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(addrs) != 1 || !addr.Equal(addrs[0]) || addrs[0].Label != addr.Label {
+ t.Fatal("Address not added properly")
+ }
+
+ if err = AddrDel(link, addr); err != nil {
+ t.Fatal(err)
+ }
+ addrs, err = AddrList(link, FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(addrs) != 0 {
+ t.Fatal("Address not removed properly")
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/link.go b/vendor/src/github.com/vishvananda/netlink/link.go
new file mode 100644
index 0000000000..276c2f80b0
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link.go
@@ -0,0 +1,175 @@
+package netlink
+
+import "net"
+
+// Link represents a link device from netlink. Shared link attributes
+// like name may be retrieved using the Attrs() method. Unique data
+// can be retrieved by casting the object to the proper type.
+type Link interface {
+ Attrs() *LinkAttrs
+ Type() string
+}
+
+// LinkAttrs represents data shared by most link types
+type LinkAttrs struct {
+ Index int
+ MTU int
+ TxQLen uint32 // Transmit Queue Length
+ Name string
+ HardwareAddr net.HardwareAddr
+ Flags net.Flags
+ ParentIndex int // index of the parent link device
+ MasterIndex int // must be the index of a bridge
+}
+
+// Device links cannot be created via netlink. These links
+// are links created by udev like 'lo' and 'etho0'
+type Device struct {
+ LinkAttrs
+}
+
+func (device *Device) Attrs() *LinkAttrs {
+ return &device.LinkAttrs
+}
+
+func (device *Device) Type() string {
+ return "device"
+}
+
+// Dummy links are dummy ethernet devices
+type Dummy struct {
+ LinkAttrs
+}
+
+func (dummy *Dummy) Attrs() *LinkAttrs {
+ return &dummy.LinkAttrs
+}
+
+func (dummy *Dummy) Type() string {
+ return "dummy"
+}
+
+// Bridge links are simple linux bridges
+type Bridge struct {
+ LinkAttrs
+}
+
+func (bridge *Bridge) Attrs() *LinkAttrs {
+ return &bridge.LinkAttrs
+}
+
+func (bridge *Bridge) Type() string {
+ return "bridge"
+}
+
+// Vlan links have ParentIndex set in their Attrs()
+type Vlan struct {
+ LinkAttrs
+ VlanId int
+}
+
+func (vlan *Vlan) Attrs() *LinkAttrs {
+ return &vlan.LinkAttrs
+}
+
+func (vlan *Vlan) Type() string {
+ return "vlan"
+}
+
+// Macvlan links have ParentIndex set in their Attrs()
+type Macvlan struct {
+ LinkAttrs
+}
+
+func (macvlan *Macvlan) Attrs() *LinkAttrs {
+ return &macvlan.LinkAttrs
+}
+
+func (macvlan *Macvlan) Type() string {
+ return "macvlan"
+}
+
+// Veth devices must specify PeerName on create
+type Veth struct {
+ LinkAttrs
+ PeerName string // veth on create only
+}
+
+func (veth *Veth) Attrs() *LinkAttrs {
+ return &veth.LinkAttrs
+}
+
+func (veth *Veth) Type() string {
+ return "veth"
+}
+
+// Generic links represent types that are not currently understood
+// by this netlink library.
+type Generic struct {
+ LinkAttrs
+ LinkType string
+}
+
+func (generic *Generic) Attrs() *LinkAttrs {
+ return &generic.LinkAttrs
+}
+
+func (generic *Generic) Type() string {
+ return generic.LinkType
+}
+
+type Vxlan struct {
+ LinkAttrs
+ VxlanId int
+ VtepDevIndex int
+ SrcAddr net.IP
+ Group net.IP
+ TTL int
+ TOS int
+ Learning bool
+ Proxy bool
+ RSC bool
+ L2miss bool
+ L3miss bool
+ NoAge bool
+ Age int
+ Limit int
+ Port int
+ PortLow int
+ PortHigh int
+}
+
+func (vxlan *Vxlan) Attrs() *LinkAttrs {
+ return &vxlan.LinkAttrs
+}
+
+func (vxlan *Vxlan) Type() string {
+ return "vxlan"
+}
+
+type IPVlanMode uint16
+
+const (
+ IPVLAN_MODE_L2 IPVlanMode = iota
+ IPVLAN_MODE_L3
+ IPVLAN_MODE_MAX
+)
+
+type IPVlan struct {
+ LinkAttrs
+ Mode IPVlanMode
+}
+
+func (ipvlan *IPVlan) Attrs() *LinkAttrs {
+ return &ipvlan.LinkAttrs
+}
+
+func (ipvlan *IPVlan) Type() string {
+ return "ipvlan"
+}
+
+// iproute2 supported devices;
+// vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
+// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
+// gre | gretap | ip6gre | ip6gretap | vti | nlmon |
+// bond_slave | ipvlan
diff --git a/vendor/src/github.com/vishvananda/netlink/link_linux.go b/vendor/src/github.com/vishvananda/netlink/link_linux.go
new file mode 100644
index 0000000000..aedea165d9
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link_linux.go
@@ -0,0 +1,696 @@
+package netlink
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+var native = nl.NativeEndian()
+var lookupByDump = false
+
+func ensureIndex(link *LinkAttrs) {
+ if link != nil && link.Index == 0 {
+ newlink, _ := LinkByName(link.Name)
+ if newlink != nil {
+ link.Index = newlink.Attrs().Index
+ }
+ }
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func LinkSetUp(link Link) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetUp disables link device.
+// Equivalent to: `ip link set $link down`
+func LinkSetDown(link Link) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Change = syscall.IFF_UP
+ msg.Flags = 0 & ^syscall.IFF_UP
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func LinkSetMTU(link Link, mtu int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(mtu))
+
+ data := nl.NewRtAttr(syscall.IFLA_MTU, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func LinkSetName(link Link, name string) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMaster(link Link, master *Bridge) error {
+ index := 0
+ if master != nil {
+ masterBase := master.Attrs()
+ ensureIndex(masterBase)
+ index = masterBase.Index
+ }
+ return LinkSetMasterByIndex(link, index)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMasterByIndex(link Link, masterIndex int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(masterIndex))
+
+ data := nl.NewRtAttr(syscall.IFLA_MASTER, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func LinkSetNsPid(link Link, nspid int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(nspid))
+
+ data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func LinkSetNsFd(link Link, fd int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(fd))
+
+ data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func boolAttr(val bool) []byte {
+ var v uint8
+ if val {
+ v = 1
+ }
+ return nl.Uint8Attr(v)
+}
+
+type vxlanPortRange struct {
+ Lo, Hi uint16
+}
+
+func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+ if vxlan.VtepDevIndex != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
+ }
+ if vxlan.SrcAddr != nil {
+ ip := vxlan.SrcAddr.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
+ } else {
+ ip = vxlan.SrcAddr.To16()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
+ }
+ }
+ }
+ if vxlan.Group != nil {
+ group := vxlan.Group.To4()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
+ } else {
+ group = vxlan.Group.To16()
+ if group != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
+ }
+ }
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
+
+ if vxlan.NoAge {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
+ } else if vxlan.Age > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
+ }
+ if vxlan.Limit > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
+ }
+ if vxlan.Port > 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port)))
+ }
+ if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
+ pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
+
+ buf := new(bytes.Buffer)
+ binary.Write(buf, binary.BigEndian, &pr)
+
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
+ }
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken fromt the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func LinkAdd(link Link) error {
+ // TODO: set mtu and hardware address
+ // TODO: support extra data for macvlan
+ base := link.Attrs()
+
+ if base.Name == "" {
+ return fmt.Errorf("LinkAttrs.Name cannot be empty!")
+ }
+
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ if base.ParentIndex != 0 {
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(base.ParentIndex))
+ data := nl.NewRtAttr(syscall.IFLA_LINK, b)
+ req.AddData(data)
+ } else if link.Type() == "ipvlan" {
+ return fmt.Errorf("Can't create ipvlan link without ParentIndex")
+ }
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
+ req.AddData(nameData)
+
+ if base.MTU > 0 {
+ mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ req.AddData(mtu)
+ }
+
+ linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
+ nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+
+ nl.NewRtAttrChild(linkInfo, syscall.IFLA_TXQLEN, nl.Uint32Attr(base.TxQLen))
+
+ if vlan, ok := link.(*Vlan); ok {
+ b := make([]byte, 2)
+ native.PutUint16(b, uint16(vlan.VlanId))
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
+ } else if veth, ok := link.(*Veth); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
+ nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
+ nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+ nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(base.TxQLen))
+ if base.MTU > 0 {
+ nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+ }
+ } else if vxlan, ok := link.(*Vxlan); ok {
+ addVxlanAttrs(vxlan, linkInfo)
+ } else if ipv, ok := link.(*IPVlan); ok {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
+ }
+
+ req.AddData(linkInfo)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+
+ ensureIndex(base)
+
+ // can't set master during create, so set it afterwards
+ if base.MasterIndex != 0 {
+ // TODO: verify MasterIndex is actually a bridge?
+ return LinkSetMasterByIndex(link, base.MasterIndex)
+ }
+ return nil
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func LinkDel(link Link) error {
+ base := link.Attrs()
+
+ ensureIndex(base)
+
+ req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+func linkByNameDump(name string) (Link, error) {
+ links, err := LinkList()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, link := range links {
+ if link.Attrs().Name == name {
+ return link, nil
+ }
+ }
+ return nil, fmt.Errorf("Link %s not found", name)
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func LinkByName(name string) (Link, error) {
+ if lookupByDump {
+ return linkByNameDump(name)
+ }
+
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name))
+ req.AddData(nameData)
+
+ link, err := execGetLink(req)
+ if err == syscall.EINVAL {
+ // older kernels don't support looking up via IFLA_IFNAME
+ // so fall back to dumping all links
+ lookupByDump = true
+ return linkByNameDump(name)
+ }
+
+ return link, err
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func LinkByIndex(index int) (Link, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(index)
+ req.AddData(msg)
+
+ return execGetLink(req)
+}
+
+func execGetLink(req *nl.NetlinkRequest) (Link, error) {
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ if errno, ok := err.(syscall.Errno); ok {
+ if errno == syscall.ENODEV {
+ return nil, fmt.Errorf("Link not found")
+ }
+ }
+ return nil, err
+ }
+
+ switch {
+ case len(msgs) == 0:
+ return nil, fmt.Errorf("Link not found")
+
+ case len(msgs) == 1:
+ return linkDeserialize(msgs[0])
+
+ default:
+ return nil, fmt.Errorf("More than one link found")
+ }
+}
+
+// linkDeserialize deserializes a raw message received from netlink into
+// a link object.
+func linkDeserialize(m []byte) (Link, error) {
+ msg := nl.DeserializeIfInfomsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ base := LinkAttrs{Index: int(msg.Index), Flags: linkFlags(msg.Flags)}
+ var link Link
+ linkType := ""
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.IFLA_LINKINFO:
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, err
+ }
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_INFO_KIND:
+ linkType = string(info.Value[:len(info.Value)-1])
+ switch linkType {
+ case "dummy":
+ link = &Dummy{}
+ case "bridge":
+ link = &Bridge{}
+ case "vlan":
+ link = &Vlan{}
+ case "veth":
+ link = &Veth{}
+ case "vxlan":
+ link = &Vxlan{}
+ case "ipvlan":
+ link = &IPVlan{}
+ default:
+ link = &Generic{LinkType: linkType}
+ }
+ case nl.IFLA_INFO_DATA:
+ data, err := nl.ParseRouteAttr(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ switch linkType {
+ case "vlan":
+ parseVlanData(link, data)
+ case "vxlan":
+ parseVxlanData(link, data)
+ case "ipvlan":
+ parseIPVlanData(link, data)
+ }
+ }
+ }
+ case syscall.IFLA_ADDRESS:
+ var nonzero bool
+ for _, b := range attr.Value {
+ if b != 0 {
+ nonzero = true
+ }
+ }
+ if nonzero {
+ base.HardwareAddr = attr.Value[:]
+ }
+ case syscall.IFLA_IFNAME:
+ base.Name = string(attr.Value[:len(attr.Value)-1])
+ case syscall.IFLA_MTU:
+ base.MTU = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_LINK:
+ base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_MASTER:
+ base.MasterIndex = int(native.Uint32(attr.Value[0:4]))
+ case syscall.IFLA_TXQLEN:
+ base.TxQLen = native.Uint32(attr.Value[0:4])
+ }
+ }
+ // Links that don't have IFLA_INFO_KIND are hardware devices
+ if link == nil {
+ link = &Device{}
+ }
+ *link.Attrs() = base
+
+ return link, nil
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func LinkList() ([]Link, error) {
+ // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
+ // to get the message ourselves to parse link type.
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]Link, 0)
+
+ for _, m := range msgs {
+ link, err := linkDeserialize(m)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, link)
+ }
+
+ return res, nil
+}
+
+func LinkSetHairpin(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
+}
+
+func LinkSetGuard(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
+}
+
+func LinkSetFastLeave(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
+}
+
+func LinkSetLearning(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
+}
+
+func LinkSetRootBlock(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
+}
+
+func LinkSetFlood(link Link, mode bool) error {
+ return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
+}
+
+func setProtinfoAttr(link Link, mode bool, attr int) error {
+ base := link.Attrs()
+ ensureIndex(base)
+ req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ msg.Type = syscall.RTM_SETLINK
+ msg.Flags = syscall.NLM_F_REQUEST
+ msg.Index = int32(base.Index)
+ msg.Change = nl.DEFAULT_CHANGE
+ req.AddData(msg)
+
+ br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
+ nl.NewRtAttrChild(br, attr, boolToByte(mode))
+ req.AddData(br)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vlan := link.(*Vlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VLAN_ID:
+ vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
+ }
+ }
+}
+
+func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ vxlan := link.(*Vxlan)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_VXLAN_ID:
+ vxlan.VxlanId = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LINK:
+ vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_LOCAL:
+ vxlan.SrcAddr = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_LOCAL6:
+ vxlan.SrcAddr = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_GROUP:
+ vxlan.Group = net.IP(datum.Value[0:4])
+ case nl.IFLA_VXLAN_GROUP6:
+ vxlan.Group = net.IP(datum.Value[0:16])
+ case nl.IFLA_VXLAN_TTL:
+ vxlan.TTL = int(datum.Value[0])
+ case nl.IFLA_VXLAN_TOS:
+ vxlan.TOS = int(datum.Value[0])
+ case nl.IFLA_VXLAN_LEARNING:
+ vxlan.Learning = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_PROXY:
+ vxlan.Proxy = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_RSC:
+ vxlan.RSC = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L2MISS:
+ vxlan.L2miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_L3MISS:
+ vxlan.L3miss = int8(datum.Value[0]) != 0
+ case nl.IFLA_VXLAN_AGEING:
+ vxlan.Age = int(native.Uint32(datum.Value[0:4]))
+ vxlan.NoAge = vxlan.Age == 0
+ case nl.IFLA_VXLAN_LIMIT:
+ vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
+ case nl.IFLA_VXLAN_PORT:
+ vxlan.Port = int(native.Uint16(datum.Value[0:2]))
+ case nl.IFLA_VXLAN_PORT_RANGE:
+ buf := bytes.NewBuffer(datum.Value[0:4])
+ var pr vxlanPortRange
+ if binary.Read(buf, binary.BigEndian, &pr) != nil {
+ vxlan.PortLow = int(pr.Lo)
+ vxlan.PortHigh = int(pr.Hi)
+ }
+ }
+ }
+}
+
+func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+ ipv := link.(*IPVlan)
+ for _, datum := range data {
+ if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
+ ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
+ return
+ }
+ }
+}
+
+// copied from pkg/net_linux.go
+func linkFlags(rawFlags uint32) net.Flags {
+ var f net.Flags
+ if rawFlags&syscall.IFF_UP != 0 {
+ f |= net.FlagUp
+ }
+ if rawFlags&syscall.IFF_BROADCAST != 0 {
+ f |= net.FlagBroadcast
+ }
+ if rawFlags&syscall.IFF_LOOPBACK != 0 {
+ f |= net.FlagLoopback
+ }
+ if rawFlags&syscall.IFF_POINTOPOINT != 0 {
+ f |= net.FlagPointToPoint
+ }
+ if rawFlags&syscall.IFF_MULTICAST != 0 {
+ f |= net.FlagMulticast
+ }
+ return f
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/link_test.go b/vendor/src/github.com/vishvananda/netlink/link_test.go
new file mode 100644
index 0000000000..05b8e95586
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link_test.go
@@ -0,0 +1,531 @@
+package netlink
+
+import (
+ "bytes"
+ "net"
+ "testing"
+
+ "github.com/vishvananda/netns"
+)
+
+const testTxQLen uint32 = 100
+
+func testLinkAddDel(t *testing.T, link Link) {
+ links, err := LinkList()
+ if err != nil {
+ t.Fatal(err)
+ }
+ num := len(links)
+
+ if err := LinkAdd(link); err != nil {
+ t.Fatal(err)
+ }
+
+ base := link.Attrs()
+
+ result, err := LinkByName(base.Name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rBase := result.Attrs()
+
+ if vlan, ok := link.(*Vlan); ok {
+ other, ok := result.(*Vlan)
+ if !ok {
+ t.Fatal("Result of create is not a vlan")
+ }
+ if vlan.VlanId != other.VlanId {
+ t.Fatal("Link.VlanId id doesn't match")
+ }
+ }
+
+ if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
+ t.Fatal("Created link doesn't have a Parent but it should")
+ } else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
+ t.Fatal("Created link has a Parent but it shouldn't")
+ } else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
+ if rBase.ParentIndex != base.ParentIndex {
+ t.Fatal("Link.ParentIndex doesn't match")
+ }
+ }
+
+ if veth, ok := link.(*Veth); ok {
+ if veth.TxQLen != testTxQLen {
+ t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, testTxQLen)
+ }
+ if rBase.MTU != base.MTU {
+ t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
+ }
+
+ if veth.PeerName != "" {
+ var peer *Veth
+ other, err := LinkByName(veth.PeerName)
+ if err != nil {
+ t.Fatalf("Peer %s not created", veth.PeerName)
+ }
+ if peer, ok = other.(*Veth); !ok {
+ t.Fatalf("Peer %s is incorrect type", veth.PeerName)
+ }
+ if peer.TxQLen != testTxQLen {
+ t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
+ }
+ }
+ }
+
+ if vxlan, ok := link.(*Vxlan); ok {
+ other, ok := result.(*Vxlan)
+ if !ok {
+ t.Fatal("Result of create is not a vxlan")
+ }
+ compareVxlan(t, vxlan, other)
+ }
+
+ if ipv, ok := link.(*IPVlan); ok {
+ other, ok := result.(*IPVlan)
+ if !ok {
+ t.Fatal("Result of create is not a ipvlan")
+ }
+ if ipv.Mode != other.Mode {
+ t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
+ }
+ }
+
+ if err = LinkDel(link); err != nil {
+ t.Fatal(err)
+ }
+
+ links, err = LinkList()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(links) != num {
+ t.Fatal("Link not removed properly")
+ }
+}
+
+func compareVxlan(t *testing.T, expected, actual *Vxlan) {
+
+ if actual.VxlanId != expected.VxlanId {
+ t.Fatal("Vxlan.VxlanId doesn't match")
+ }
+ if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
+ t.Fatal("Vxlan.SrcAddr doesn't match")
+ }
+ if expected.Group != nil && !actual.Group.Equal(expected.Group) {
+ t.Fatal("Vxlan.Group doesn't match")
+ }
+ if expected.TTL != -1 && actual.TTL != expected.TTL {
+ t.Fatal("Vxlan.TTL doesn't match")
+ }
+ if expected.TOS != -1 && actual.TOS != expected.TOS {
+ t.Fatal("Vxlan.TOS doesn't match")
+ }
+ if actual.Learning != expected.Learning {
+ t.Fatal("Vxlan.Learning doesn't match")
+ }
+ if actual.Proxy != expected.Proxy {
+ t.Fatal("Vxlan.Proxy doesn't match")
+ }
+ if actual.RSC != expected.RSC {
+ t.Fatal("Vxlan.RSC doesn't match")
+ }
+ if actual.L2miss != expected.L2miss {
+ t.Fatal("Vxlan.L2miss doesn't match")
+ }
+ if actual.L3miss != expected.L3miss {
+ t.Fatal("Vxlan.L3miss doesn't match")
+ }
+ if expected.NoAge {
+ if !actual.NoAge {
+ t.Fatal("Vxlan.NoAge doesn't match")
+ }
+ } else if expected.Age > 0 && actual.Age != expected.Age {
+ t.Fatal("Vxlan.Age doesn't match")
+ }
+ if expected.Limit > 0 && actual.Limit != expected.Limit {
+ t.Fatal("Vxlan.Limit doesn't match")
+ }
+ if expected.Port > 0 && actual.Port != expected.Port {
+ t.Fatal("Vxlan.Port doesn't match")
+ }
+ if expected.PortLow > 0 || expected.PortHigh > 0 {
+ if actual.PortLow != expected.PortLow {
+ t.Fatal("Vxlan.PortLow doesn't match")
+ }
+ if actual.PortHigh != expected.PortHigh {
+ t.Fatal("Vxlan.PortHigh doesn't match")
+ }
+ }
+}
+
+func TestLinkAddDelDummy(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
+}
+
+func TestLinkAddDelBridge(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}})
+}
+
+func TestLinkAddDelVlan(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ parent := &Dummy{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(parent); err != nil {
+ t.Fatal(err)
+ }
+
+ testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900})
+
+ if err := LinkDel(parent); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLinkAddDelMacvlan(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ parent := &Dummy{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(parent); err != nil {
+ t.Fatal(err)
+ }
+
+ testLinkAddDel(t, &Macvlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}})
+
+ if err := LinkDel(parent); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLinkAddDelVeth(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"})
+}
+
+func TestLinkAddDelBridgeMaster(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ master := &Bridge{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(master); err != nil {
+ t.Fatal(err)
+ }
+ testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
+
+ if err := LinkDel(master); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLinkSetUnsetResetMaster(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ master := &Bridge{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(master); err != nil {
+ t.Fatal(err)
+ }
+
+ newmaster := &Bridge{LinkAttrs{Name: "bar"}}
+ if err := LinkAdd(newmaster); err != nil {
+ t.Fatal(err)
+ }
+
+ slave := &Dummy{LinkAttrs{Name: "baz"}}
+ if err := LinkAdd(slave); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := LinkSetMaster(slave, master); err != nil {
+ t.Fatal(err)
+ }
+
+ link, err := LinkByName("baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if link.Attrs().MasterIndex != master.Attrs().Index {
+ t.Fatal("Master not set properly")
+ }
+
+ if err := LinkSetMaster(slave, newmaster); err != nil {
+ t.Fatal(err)
+ }
+
+ link, err = LinkByName("baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if link.Attrs().MasterIndex != newmaster.Attrs().Index {
+ t.Fatal("Master not reset properly")
+ }
+
+ if err := LinkSetMaster(slave, nil); err != nil {
+ t.Fatal(err)
+ }
+
+ link, err = LinkByName("baz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if link.Attrs().MasterIndex != 0 {
+ t.Fatal("Master not unset properly")
+ }
+ if err := LinkDel(slave); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := LinkDel(newmaster); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := LinkDel(master); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLinkSetNs(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ basens, err := netns.Get()
+ if err != nil {
+ t.Fatal("Failed to get basens")
+ }
+ defer basens.Close()
+
+ newns, err := netns.New()
+ if err != nil {
+ t.Fatal("Failed to create newns")
+ }
+ defer newns.Close()
+
+ link := &Veth{LinkAttrs{Name: "foo"}, "bar"}
+ if err := LinkAdd(link); err != nil {
+ t.Fatal(err)
+ }
+
+ peer, err := LinkByName("bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ LinkSetNsFd(peer, int(basens))
+ if err != nil {
+ t.Fatal("Failed to set newns for link")
+ }
+
+ _, err = LinkByName("bar")
+ if err == nil {
+ t.Fatal("Link bar is still in newns")
+ }
+
+ err = netns.Set(basens)
+ if err != nil {
+ t.Fatal("Failed to set basens")
+ }
+
+ peer, err = LinkByName("bar")
+ if err != nil {
+ t.Fatal("Link is not in basens")
+ }
+
+ if err := LinkDel(peer); err != nil {
+ t.Fatal(err)
+ }
+
+ err = netns.Set(newns)
+ if err != nil {
+ t.Fatal("Failed to set newns")
+ }
+
+ _, err = LinkByName("foo")
+ if err == nil {
+ t.Fatal("Other half of veth pair not deleted")
+ }
+
+}
+
+func TestLinkAddDelVxlan(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ parent := &Dummy{
+ LinkAttrs{Name: "foo"},
+ }
+ if err := LinkAdd(parent); err != nil {
+ t.Fatal(err)
+ }
+
+ vxlan := Vxlan{
+ LinkAttrs: LinkAttrs{
+ Name: "bar",
+ },
+ VxlanId: 10,
+ VtepDevIndex: parent.Index,
+ Learning: true,
+ L2miss: true,
+ L3miss: true,
+ }
+
+ testLinkAddDel(t, &vxlan)
+ if err := LinkDel(parent); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestLinkAddDelIPVlanL2(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+ parent := &Dummy{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(parent); err != nil {
+ t.Fatal(err)
+ }
+
+ ipv := IPVlan{
+ LinkAttrs: LinkAttrs{
+ Name: "bar",
+ ParentIndex: parent.Index,
+ },
+ Mode: IPVLAN_MODE_L2,
+ }
+
+ testLinkAddDel(t, &ipv)
+}
+
+func TestLinkAddDelIPVlanL3(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+ parent := &Dummy{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(parent); err != nil {
+ t.Fatal(err)
+ }
+
+ ipv := IPVlan{
+ LinkAttrs: LinkAttrs{
+ Name: "bar",
+ ParentIndex: parent.Index,
+ },
+ Mode: IPVLAN_MODE_L3,
+ }
+
+ testLinkAddDel(t, &ipv)
+}
+
+func TestLinkAddDelIPVlanNoParent(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ ipv := IPVlan{
+ LinkAttrs: LinkAttrs{
+ Name: "bar",
+ },
+ Mode: IPVLAN_MODE_L3,
+ }
+ err := LinkAdd(&ipv)
+ if err == nil {
+ t.Fatal("Add should fail if ipvlan creating without ParentIndex")
+ }
+ if err.Error() != "Can't create ipvlan link without ParentIndex" {
+ t.Fatalf("Error should be about missing ParentIndex, got %q", err)
+ }
+}
+
+func TestLinkByIndex(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ dummy := &Dummy{LinkAttrs{Name: "dummy"}}
+ if err := LinkAdd(dummy); err != nil {
+ t.Fatal(err)
+ }
+
+ found, err := LinkByIndex(dummy.Index)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if found.Attrs().Index != dummy.Attrs().Index {
+ t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
+ }
+
+ LinkDel(dummy)
+
+ // test not found
+ _, err = LinkByIndex(dummy.Attrs().Index)
+ if err == nil {
+ t.Fatalf("LinkByIndex(%v) found deleted link", err)
+ }
+}
+
+func TestLinkSet(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ iface := &Dummy{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(iface); err != nil {
+ t.Fatal(err)
+ }
+
+ link, err := LinkByName("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = LinkSetName(link, "bar")
+ if err != nil {
+ t.Fatalf("Could not change interface name: %v", err)
+ }
+
+ link, err = LinkByName("bar")
+ if err != nil {
+ t.Fatalf("Interface name not changed: %v", err)
+ }
+
+ err = LinkSetMTU(link, 1400)
+ if err != nil {
+ t.Fatalf("Could not set MTU: %v", err)
+ }
+
+ link, err = LinkByName("bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if link.Attrs().MTU != 1400 {
+ t.Fatal("MTU not changed!")
+ }
+
+ addr, err := net.ParseMAC("00:12:34:56:78:AB")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = LinkSetHardwareAddr(link, addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ link, err = LinkByName("bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
+ t.Fatalf("hardware address not changed!")
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh.go b/vendor/src/github.com/vishvananda/netlink/neigh.go
new file mode 100644
index 0000000000..0e5eb90c9e
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh.go
@@ -0,0 +1,22 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Neigh represents a link layer neighbor from netlink.
+type Neigh struct {
+ LinkIndex int
+ Family int
+ State int
+ Type int
+ Flags int
+ IP net.IP
+ HardwareAddr net.HardwareAddr
+}
+
+// String returns $ip/$hwaddr $label
+func (neigh *Neigh) String() string {
+ return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh_linux.go b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
new file mode 100644
index 0000000000..1fdaa3a37e
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
@@ -0,0 +1,189 @@
+package netlink
+
+import (
+ "net"
+ "syscall"
+ "unsafe"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ NDA_UNSPEC = iota
+ NDA_DST
+ NDA_LLADDR
+ NDA_CACHEINFO
+ NDA_PROBES
+ NDA_VLAN
+ NDA_PORT
+ NDA_VNI
+ NDA_IFINDEX
+ NDA_MAX = NDA_IFINDEX
+)
+
+// Neighbor Cache Entry States.
+const (
+ NUD_NONE = 0x00
+ NUD_INCOMPLETE = 0x01
+ NUD_REACHABLE = 0x02
+ NUD_STALE = 0x04
+ NUD_DELAY = 0x08
+ NUD_PROBE = 0x10
+ NUD_FAILED = 0x20
+ NUD_NOARP = 0x40
+ NUD_PERMANENT = 0x80
+)
+
+// Neighbor Flags
+const (
+ NTF_USE = 0x01
+ NTF_SELF = 0x02
+ NTF_MASTER = 0x04
+ NTF_PROXY = 0x08
+ NTF_ROUTER = 0x80
+)
+
+type Ndmsg struct {
+ Family uint8
+ Index uint32
+ State uint16
+ Flags uint8
+ Type uint8
+}
+
+func deserializeNdmsg(b []byte) *Ndmsg {
+ var dummy Ndmsg
+ return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0]))
+}
+
+func (msg *Ndmsg) Serialize() []byte {
+ return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *Ndmsg) Len() int {
+ return int(unsafe.Sizeof(*msg))
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func NeighAdd(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
+}
+
+// NeighAdd will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func NeighSet(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func NeighAppend(neigh *Neigh) error {
+ return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+}
+
+func neighAdd(neigh *Neigh, mode int) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func NeighDel(neigh *Neigh) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
+ return neighHandle(neigh, req)
+}
+
+func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
+ var family int
+ if neigh.Family > 0 {
+ family = neigh.Family
+ } else {
+ family = nl.GetIPFamily(neigh.IP)
+ }
+
+ msg := Ndmsg{
+ Family: uint8(family),
+ Index: uint32(neigh.LinkIndex),
+ State: uint16(neigh.State),
+ Type: uint8(neigh.Type),
+ Flags: uint8(neigh.Flags),
+ }
+ req.AddData(&msg)
+
+ ipData := neigh.IP.To4()
+ if ipData == nil {
+ ipData = neigh.IP.To16()
+ }
+
+ dstData := nl.NewRtAttr(NDA_DST, ipData)
+ req.AddData(dstData)
+
+ hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
+ req.AddData(hwData)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
+ msg := Ndmsg{
+ Family: uint8(family),
+ }
+ req.AddData(&msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]Neigh, 0)
+ for _, m := range msgs {
+ ndm := deserializeNdmsg(m)
+ if linkIndex != 0 && int(ndm.Index) != linkIndex {
+ // Ignore messages from other interfaces
+ continue
+ }
+
+ neigh, err := NeighDeserialize(m)
+ if err != nil {
+ continue
+ }
+
+ res = append(res, *neigh)
+ }
+
+ return res, nil
+}
+
+func NeighDeserialize(m []byte) (*Neigh, error) {
+ msg := deserializeNdmsg(m)
+
+ neigh := Neigh{
+ LinkIndex: int(msg.Index),
+ Family: int(msg.Family),
+ State: int(msg.State),
+ Type: int(msg.Type),
+ Flags: int(msg.Flags),
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case NDA_DST:
+ neigh.IP = net.IP(attr.Value)
+ case NDA_LLADDR:
+ neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ }
+ }
+
+ return &neigh, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh_test.go b/vendor/src/github.com/vishvananda/netlink/neigh_test.go
new file mode 100644
index 0000000000..50da59c5c5
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh_test.go
@@ -0,0 +1,104 @@
+package netlink
+
+import (
+ "net"
+ "testing"
+)
+
+type arpEntry struct {
+ ip net.IP
+ mac net.HardwareAddr
+}
+
+func parseMAC(s string) net.HardwareAddr {
+ m, err := net.ParseMAC(s)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+func dumpContains(dump []Neigh, e arpEntry) bool {
+ for _, n := range dump {
+ if n.IP.Equal(e.ip) && (n.State&NUD_INCOMPLETE) == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func TestNeighAddDel(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ dummy := Dummy{LinkAttrs{Name: "neigh0"}}
+ if err := LinkAdd(&dummy); err != nil {
+ t.Fatal(err)
+ }
+
+ ensureIndex(dummy.Attrs())
+
+ arpTable := []arpEntry{
+ {net.ParseIP("10.99.0.1"), parseMAC("aa:bb:cc:dd:00:01")},
+ {net.ParseIP("10.99.0.2"), parseMAC("aa:bb:cc:dd:00:02")},
+ {net.ParseIP("10.99.0.3"), parseMAC("aa:bb:cc:dd:00:03")},
+ {net.ParseIP("10.99.0.4"), parseMAC("aa:bb:cc:dd:00:04")},
+ {net.ParseIP("10.99.0.5"), parseMAC("aa:bb:cc:dd:00:05")},
+ }
+
+ // Add the arpTable
+ for _, entry := range arpTable {
+ err := NeighAdd(&Neigh{
+ LinkIndex: dummy.Index,
+ State: NUD_REACHABLE,
+ IP: entry.ip,
+ HardwareAddr: entry.mac,
+ })
+
+ if err != nil {
+ t.Errorf("Failed to NeighAdd: %v", err)
+ }
+ }
+
+ // Dump and see that all added entries are there
+ dump, err := NeighList(dummy.Index, 0)
+ if err != nil {
+ t.Errorf("Failed to NeighList: %v", err)
+ }
+
+ for _, entry := range arpTable {
+ if !dumpContains(dump, entry) {
+ t.Errorf("Dump does not contain: %v", entry)
+ }
+ }
+
+ // Delete the arpTable
+ for _, entry := range arpTable {
+ err := NeighDel(&Neigh{
+ LinkIndex: dummy.Index,
+ IP: entry.ip,
+ HardwareAddr: entry.mac,
+ })
+
+ if err != nil {
+ t.Errorf("Failed to NeighDel: %v", err)
+ }
+ }
+
+ // TODO: seems not working because of cache
+ //// Dump and see that none of deleted entries are there
+ //dump, err = NeighList(dummy.Index, 0)
+ //if err != nil {
+ //t.Errorf("Failed to NeighList: %v", err)
+ //}
+
+ //for _, entry := range arpTable {
+ //if dumpContains(dump, entry) {
+ //t.Errorf("Dump contains: %v", entry)
+ //}
+ //}
+
+ if err := LinkDel(&dummy); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink.go b/vendor/src/github.com/vishvananda/netlink/netlink.go
new file mode 100644
index 0000000000..41ebdb11f1
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink.go
@@ -0,0 +1,39 @@
+// Package netlink provides a simple library for netlink. Netlink is
+// the interface a user-space program in linux uses to communicate with
+// the kernel. It can be used to add and remove interfaces, set up ip
+// addresses and routes, and confiugre ipsec. Netlink communication
+// requires elevated privileges, so in most cases this code needs to
+// be run as root. The low level primitives for netlink are contained
+// in the nl subpackage. This package attempts to provide a high-level
+// interface that is loosly modeled on the iproute2 cli.
+package netlink
+
+import (
+ "net"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+const (
+ // Family type definitions
+ FAMILY_ALL = nl.FAMILY_ALL
+ FAMILY_V4 = nl.FAMILY_V4
+ FAMILY_V6 = nl.FAMILY_V6
+)
+
+// ParseIPNet parses a string in ip/net format and returns a net.IPNet.
+// This is valuable because addresses in netlink are often IPNets and
+// ParseCIDR returns an IPNet with the IP part set to the base IP of the
+// range.
+func ParseIPNet(s string) (*net.IPNet, error) {
+ ip, ipNet, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil, err
+ }
+ return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil
+}
+
+// NewIPNet generates an IPNet from an ip address using a netmask of 32.
+func NewIPNet(ip net.IP) *net.IPNet {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink_test.go b/vendor/src/github.com/vishvananda/netlink/netlink_test.go
new file mode 100644
index 0000000000..3292b750a9
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink_test.go
@@ -0,0 +1,34 @@
+package netlink
+
+import (
+ "log"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/vishvananda/netns"
+)
+
+type tearDownNetlinkTest func()
+
+func setUpNetlinkTest(t *testing.T) tearDownNetlinkTest {
+ if os.Getuid() != 0 {
+ msg := "Skipped test because it requires root privileges."
+ log.Printf(msg)
+ t.Skip(msg)
+ }
+
+ // new temporary namespace so we don't pollute the host
+ // lock thread since the namespace is thread local
+ runtime.LockOSThread()
+ var err error
+ ns, err := netns.New()
+ if err != nil {
+ t.Fatal("Failed to create newns", ns)
+ }
+
+ return func() {
+ ns.Close()
+ runtime.UnlockOSThread()
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go
new file mode 100644
index 0000000000..10c49c1bfc
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -0,0 +1,143 @@
+// +build !linux
+
+package netlink
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+func LinkSetUp(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetDown(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMTU(link *Link, mtu int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetMaster(link *Link, master *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsPid(link *Link, nspid int) error {
+ return ErrNotImplemented
+}
+
+func LinkSetNsFd(link *Link, fd int) error {
+ return ErrNotImplemented
+}
+
+func LinkAdd(link *Link) error {
+ return ErrNotImplemented
+}
+
+func LinkDel(link *Link) error {
+ return ErrNotImplemented
+}
+
+func SetHairpin(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetGuard(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFastLeave(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetLearning(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetRootBlock(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func SetFlood(link Link, mode bool) error {
+ return ErrNotImplemented
+}
+
+func LinkList() ([]Link, error) {
+ return nil, ErrNotImplemented
+}
+
+func AddrAdd(link *Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrDel(link *Link, addr *Addr) error {
+ return ErrNotImplemented
+}
+
+func AddrList(link *Link, family int) ([]Addr, error) {
+ return nil, ErrNotImplemented
+}
+
+func RouteAdd(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteDel(route *Route) error {
+ return ErrNotImplemented
+}
+
+func RouteList(link *Link, family int) ([]Route, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ return ErrNotImplemented
+}
+
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ return nil, ErrNotImplemented
+}
+
+func XfrmStateAdd(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateDel(policy *XfrmState) error {
+ return ErrNotImplemented
+}
+
+func XfrmStateList(family int) ([]XfrmState, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighAdd(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighSet(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighAppend(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighDel(neigh *Neigh) error {
+ return ErrNotImplemented
+}
+
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+ return nil, ErrNotImplemented
+}
+
+func NeighDeserialize(m []byte) (*Ndmsg, *Neigh, error) {
+ return nil, nil, ErrNotImplemented
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go
new file mode 100644
index 0000000000..17088fa0c0
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go
@@ -0,0 +1,47 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type IfAddrmsg struct {
+ syscall.IfAddrmsg
+}
+
+func NewIfAddrmsg(family int) *IfAddrmsg {
+ return &IfAddrmsg{
+ IfAddrmsg: syscall.IfAddrmsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+// struct ifaddrmsg {
+// __u8 ifa_family;
+// __u8 ifa_prefixlen; /* The prefix length */
+// __u8 ifa_flags; /* Flags */
+// __u8 ifa_scope; /* Address scope */
+// __u32 ifa_index; /* Link index */
+// };
+
+// type IfAddrmsg struct {
+// Family uint8
+// Prefixlen uint8
+// Flags uint8
+// Scope uint8
+// Index uint32
+// }
+// SizeofIfAddrmsg = 0x8
+
+func DeserializeIfAddrmsg(b []byte) *IfAddrmsg {
+ return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0]))
+}
+
+func (msg *IfAddrmsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfAddrmsg) Len() int {
+ return syscall.SizeofIfAddrmsg
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
new file mode 100644
index 0000000000..98c3b211f3
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
@@ -0,0 +1,39 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "syscall"
+ "testing"
+)
+
+func (msg *IfAddrmsg) write(b []byte) {
+ native := NativeEndian()
+ b[0] = msg.Family
+ b[1] = msg.Prefixlen
+ b[2] = msg.Flags
+ b[3] = msg.Scope
+ native.PutUint32(b[4:8], msg.Index)
+}
+
+func (msg *IfAddrmsg) serializeSafe() []byte {
+ len := syscall.SizeofIfAddrmsg
+ b := make([]byte, len)
+ msg.write(b)
+ return b
+}
+
+func deserializeIfAddrmsgSafe(b []byte) *IfAddrmsg {
+ var msg = IfAddrmsg{}
+ binary.Read(bytes.NewReader(b[0:syscall.SizeofIfAddrmsg]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestIfAddrmsgDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, syscall.SizeofIfAddrmsg)
+ rand.Read(orig)
+ safemsg := deserializeIfAddrmsgSafe(orig)
+ msg := DeserializeIfAddrmsg(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go
new file mode 100644
index 0000000000..ab0dede65d
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go
@@ -0,0 +1,81 @@
+package nl
+
+const (
+ DEFAULT_CHANGE = 0xFFFFFFFF
+)
+
+const (
+ IFLA_INFO_UNSPEC = iota
+ IFLA_INFO_KIND
+ IFLA_INFO_DATA
+ IFLA_INFO_XSTATS
+ IFLA_INFO_MAX = IFLA_INFO_XSTATS
+)
+
+const (
+ IFLA_VLAN_UNSPEC = iota
+ IFLA_VLAN_ID
+ IFLA_VLAN_FLAGS
+ IFLA_VLAN_EGRESS_QOS
+ IFLA_VLAN_INGRESS_QOS
+ IFLA_VLAN_PROTOCOL
+ IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL
+)
+
+const (
+ VETH_INFO_UNSPEC = iota
+ VETH_INFO_PEER
+ VETH_INFO_MAX = VETH_INFO_PEER
+)
+
+const (
+ IFLA_VXLAN_UNSPEC = iota
+ IFLA_VXLAN_ID
+ IFLA_VXLAN_GROUP
+ IFLA_VXLAN_LINK
+ IFLA_VXLAN_LOCAL
+ IFLA_VXLAN_TTL
+ IFLA_VXLAN_TOS
+ IFLA_VXLAN_LEARNING
+ IFLA_VXLAN_AGEING
+ IFLA_VXLAN_LIMIT
+ IFLA_VXLAN_PORT_RANGE
+ IFLA_VXLAN_PROXY
+ IFLA_VXLAN_RSC
+ IFLA_VXLAN_L2MISS
+ IFLA_VXLAN_L3MISS
+ IFLA_VXLAN_PORT
+ IFLA_VXLAN_GROUP6
+ IFLA_VXLAN_LOCAL6
+ IFLA_VXLAN_MAX = IFLA_VXLAN_LOCAL6
+)
+
+const (
+ BRIDGE_MODE_UNSPEC = iota
+ BRIDGE_MODE_HAIRPIN
+)
+
+const (
+ IFLA_BRPORT_UNSPEC = iota
+ IFLA_BRPORT_STATE
+ IFLA_BRPORT_PRIORITY
+ IFLA_BRPORT_COST
+ IFLA_BRPORT_MODE
+ IFLA_BRPORT_GUARD
+ IFLA_BRPORT_PROTECT
+ IFLA_BRPORT_FAST_LEAVE
+ IFLA_BRPORT_LEARNING
+ IFLA_BRPORT_UNICAST_FLOOD
+ IFLA_BRPORT_MAX = IFLA_BRPORT_UNICAST_FLOOD
+)
+
+const (
+ IFLA_IPVLAN_UNSPEC = iota
+ IFLA_IPVLAN_MODE
+ IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE
+)
+
+const (
+ // not defined in syscall
+ IFLA_NET_NS_FD = 28
+)
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
new file mode 100644
index 0000000000..72f2813773
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -0,0 +1,417 @@
+// Package nl has low level primitives for making Netlink calls.
+package nl
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // Family type definitions
+ FAMILY_ALL = syscall.AF_UNSPEC
+ FAMILY_V4 = syscall.AF_INET
+ FAMILY_V6 = syscall.AF_INET6
+)
+
+var nextSeqNr uint32
+
+// GetIPFamily returns the family type of a net.IP.
+func GetIPFamily(ip net.IP) int {
+ if len(ip) <= net.IPv4len {
+ return FAMILY_V4
+ }
+ if ip.To4() != nil {
+ return FAMILY_V4
+ }
+ return FAMILY_V6
+}
+
+var nativeEndian binary.ByteOrder
+
+// Get native endianness for the system
+func NativeEndian() binary.ByteOrder {
+ if nativeEndian == nil {
+ var x uint32 = 0x01020304
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ nativeEndian = binary.BigEndian
+ }
+ nativeEndian = binary.LittleEndian
+ }
+ return nativeEndian
+}
+
+// Byte swap a 16 bit value if we aren't big endian
+func Swap16(i uint16) uint16 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff00)>>8 | (i&0xff)<<8
+}
+
+// Byte swap a 32 bit value if aren't big endian
+func Swap32(i uint32) uint32 {
+ if NativeEndian() == binary.BigEndian {
+ return i
+ }
+ return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
+}
+
+type NetlinkRequestData interface {
+ Len() int
+ Serialize() []byte
+}
+
+// IfInfomsg is related to links, but it is used for list requests as well
+type IfInfomsg struct {
+ syscall.IfInfomsg
+}
+
+// Create an IfInfomsg with family specified
+func NewIfInfomsg(family int) *IfInfomsg {
+ return &IfInfomsg{
+ IfInfomsg: syscall.IfInfomsg{
+ Family: uint8(family),
+ },
+ }
+}
+
+func DeserializeIfInfomsg(b []byte) *IfInfomsg {
+ return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0]))
+}
+
+func (msg *IfInfomsg) Serialize() []byte {
+ return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfInfomsg) Len() int {
+ return syscall.SizeofIfInfomsg
+}
+
+func rtaAlignOf(attrlen int) int {
+ return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
+}
+
+func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
+ msg := NewIfInfomsg(family)
+ parent.children = append(parent.children, msg)
+ return msg
+}
+
+// Extend RtAttr to handle data and children
+type RtAttr struct {
+ syscall.RtAttr
+ Data []byte
+ children []NetlinkRequestData
+}
+
+// Create a new Extended RtAttr object
+func NewRtAttr(attrType int, data []byte) *RtAttr {
+ return &RtAttr{
+ RtAttr: syscall.RtAttr{
+ Type: uint16(attrType),
+ },
+ children: []NetlinkRequestData{},
+ Data: data,
+ }
+}
+
+// Create a new RtAttr obj anc add it as a child of an existing object
+func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
+ attr := NewRtAttr(attrType, data)
+ parent.children = append(parent.children, attr)
+ return attr
+}
+
+func (a *RtAttr) Len() int {
+ if len(a.children) == 0 {
+ return (syscall.SizeofRtAttr + len(a.Data))
+ }
+
+ l := 0
+ for _, child := range a.children {
+ l += rtaAlignOf(child.Len())
+ }
+ l += syscall.SizeofRtAttr
+ return rtaAlignOf(l + len(a.Data))
+}
+
+// Serialize the RtAttr into a byte array
+// This can't ust unsafe.cast because it must iterate through children.
+func (a *RtAttr) Serialize() []byte {
+ native := NativeEndian()
+
+ length := a.Len()
+ buf := make([]byte, rtaAlignOf(length))
+
+ if a.Data != nil {
+ copy(buf[4:], a.Data)
+ } else {
+ next := 4
+ for _, child := range a.children {
+ childBuf := child.Serialize()
+ copy(buf[next:], childBuf)
+ next += rtaAlignOf(len(childBuf))
+ }
+ }
+
+ if l := uint16(length); l != 0 {
+ native.PutUint16(buf[0:2], l)
+ }
+ native.PutUint16(buf[2:4], a.Type)
+ return buf
+}
+
+type NetlinkRequest struct {
+ syscall.NlMsghdr
+ Data []NetlinkRequestData
+}
+
+// Serialize the Netlink Request into a byte array
+func (msg *NetlinkRequest) Serialize() []byte {
+ length := syscall.SizeofNlMsghdr
+ dataBytes := make([][]byte, len(msg.Data))
+ for i, data := range msg.Data {
+ dataBytes[i] = data.Serialize()
+ length = length + len(dataBytes[i])
+ }
+ msg.Len = uint32(length)
+ b := make([]byte, length)
+ hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(msg)))[:]
+ next := syscall.SizeofNlMsghdr
+ copy(b[0:next], hdr)
+ for _, data := range dataBytes {
+ for _, dataByte := range data {
+ b[next] = dataByte
+ next = next + 1
+ }
+ }
+ return b
+}
+
+func (msg *NetlinkRequest) AddData(data NetlinkRequestData) {
+ if data != nil {
+ msg.Data = append(msg.Data, data)
+ }
+}
+
+// Execute the request against a the given sockType.
+// Returns a list of netlink messages in seriaized format, optionally filtered
+// by resType.
+func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
+ s, err := getNetlinkSocket(sockType)
+ if err != nil {
+ return nil, err
+ }
+ defer s.Close()
+
+ if err := s.Send(req); err != nil {
+ return nil, err
+ }
+
+ pid, err := s.GetPid()
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([][]byte, 0)
+
+done:
+ for {
+ msgs, err := s.Recieve()
+ if err != nil {
+ return nil, err
+ }
+ for _, m := range msgs {
+ if m.Header.Seq != req.Seq {
+ return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq)
+ }
+ if m.Header.Pid != pid {
+ return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+ }
+ if m.Header.Type == syscall.NLMSG_DONE {
+ break done
+ }
+ if m.Header.Type == syscall.NLMSG_ERROR {
+ native := NativeEndian()
+ error := int32(native.Uint32(m.Data[0:4]))
+ if error == 0 {
+ break done
+ }
+ return nil, syscall.Errno(-error)
+ }
+ if resType != 0 && m.Header.Type != resType {
+ continue
+ }
+ res = append(res, m.Data)
+ if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
+ break done
+ }
+ }
+ }
+ return res, nil
+}
+
+// Create a new netlink request from proto and flags
+// Note the Len value will be inaccurate once data is added until
+// the message is serialized
+func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
+ return &NetlinkRequest{
+ NlMsghdr: syscall.NlMsghdr{
+ Len: uint32(syscall.SizeofNlMsghdr),
+ Type: uint16(proto),
+ Flags: syscall.NLM_F_REQUEST | uint16(flags),
+ Seq: atomic.AddUint32(&nextSeqNr, 1),
+ },
+ }
+}
+
+type NetlinkSocket struct {
+ fd int
+ lsa syscall.SockaddrNetlink
+}
+
+func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: fd,
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
+// and subscribe it to multicast groups passed in variable argument list.
+// Returns the netlink socket on whic hReceive() method can be called
+// to retrieve the messages from the kernel.
+func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
+ fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+ if err != nil {
+ return nil, err
+ }
+ s := &NetlinkSocket{
+ fd: fd,
+ }
+ s.lsa.Family = syscall.AF_NETLINK
+
+ for _, g := range groups {
+ s.lsa.Groups |= (1 << (g - 1))
+ }
+
+ if err := syscall.Bind(fd, &s.lsa); err != nil {
+ syscall.Close(fd)
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func (s *NetlinkSocket) Close() {
+ syscall.Close(s.fd)
+}
+
+func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
+ if err := syscall.Sendto(s.fd, request.Serialize(), 0, &s.lsa); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (s *NetlinkSocket) Recieve() ([]syscall.NetlinkMessage, error) {
+ rb := make([]byte, syscall.Getpagesize())
+ nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
+ if err != nil {
+ return nil, err
+ }
+ if nr < syscall.NLMSG_HDRLEN {
+ return nil, fmt.Errorf("Got short response from netlink")
+ }
+ rb = rb[:nr]
+ return syscall.ParseNetlinkMessage(rb)
+}
+
+func (s *NetlinkSocket) GetPid() (uint32, error) {
+ lsa, err := syscall.Getsockname(s.fd)
+ if err != nil {
+ return 0, err
+ }
+ switch v := lsa.(type) {
+ case *syscall.SockaddrNetlink:
+ return v.Pid, nil
+ }
+ return 0, fmt.Errorf("Wrong socket type")
+}
+
+func ZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s)+1)
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ bytes[len(s)] = 0
+ return bytes
+}
+
+func NonZeroTerminated(s string) []byte {
+ bytes := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ bytes[i] = s[i]
+ }
+ return bytes
+}
+
+func BytesToString(b []byte) string {
+ n := bytes.Index(b, []byte{0})
+ return string(b[:n])
+}
+
+func Uint8Attr(v uint8) []byte {
+ return []byte{byte(v)}
+}
+
+func Uint16Attr(v uint16) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 2)
+ native.PutUint16(bytes, v)
+ return bytes
+}
+
+func Uint32Attr(v uint32) []byte {
+ native := NativeEndian()
+ bytes := make([]byte, 4)
+ native.PutUint32(bytes, v)
+ return bytes
+}
+
+func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
+ var attrs []syscall.NetlinkRouteAttr
+ for len(b) >= syscall.SizeofRtAttr {
+ a, vbuf, alen, err := netlinkRouteAttrAndValue(b)
+ if err != nil {
+ return nil, err
+ }
+ ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]}
+ attrs = append(attrs, ra)
+ b = b[alen:]
+ }
+ return attrs, nil
+}
+
+func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) {
+ a := (*syscall.RtAttr)(unsafe.Pointer(&b[0]))
+ if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) {
+ return nil, nil, 0, syscall.EINVAL
+ }
+ return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
new file mode 100644
index 0000000000..4672684c7a
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
@@ -0,0 +1,60 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "reflect"
+ "syscall"
+ "testing"
+)
+
+type testSerializer interface {
+ serializeSafe() []byte
+ Serialize() []byte
+}
+
+func testDeserializeSerialize(t *testing.T, orig []byte, safemsg testSerializer, msg testSerializer) {
+ if !reflect.DeepEqual(safemsg, msg) {
+ t.Fatal("Deserialization failed.\n", safemsg, "\n", msg)
+ }
+ safe := msg.serializeSafe()
+ if !bytes.Equal(safe, orig) {
+ t.Fatal("Safe serialization failed.\n", safe, "\n", orig)
+ }
+ b := msg.Serialize()
+ if !bytes.Equal(b, safe) {
+ t.Fatal("Serialization failed.\n", b, "\n", safe)
+ }
+}
+
+func (msg *IfInfomsg) write(b []byte) {
+ native := NativeEndian()
+ b[0] = msg.Family
+ b[1] = msg.X__ifi_pad
+ native.PutUint16(b[2:4], msg.Type)
+ native.PutUint32(b[4:8], uint32(msg.Index))
+ native.PutUint32(b[8:12], msg.Flags)
+ native.PutUint32(b[12:16], msg.Change)
+}
+
+func (msg *IfInfomsg) serializeSafe() []byte {
+ length := syscall.SizeofIfInfomsg
+ b := make([]byte, length)
+ msg.write(b)
+ return b
+}
+
+func deserializeIfInfomsgSafe(b []byte) *IfInfomsg {
+ var msg = IfInfomsg{}
+ binary.Read(bytes.NewReader(b[0:syscall.SizeofIfInfomsg]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestIfInfomsgDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, syscall.SizeofIfInfomsg)
+ rand.Read(orig)
+ safemsg := deserializeIfInfomsgSafe(orig)
+ msg := DeserializeIfInfomsg(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go
new file mode 100644
index 0000000000..5dde998e96
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go
@@ -0,0 +1,33 @@
+package nl
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type RtMsg struct {
+ syscall.RtMsg
+}
+
+func NewRtMsg() *RtMsg {
+ return &RtMsg{
+ RtMsg: syscall.RtMsg{
+ Table: syscall.RT_TABLE_MAIN,
+ Scope: syscall.RT_SCOPE_UNIVERSE,
+ Protocol: syscall.RTPROT_BOOT,
+ Type: syscall.RTN_UNICAST,
+ },
+ }
+}
+
+func (msg *RtMsg) Len() int {
+ return syscall.SizeofRtMsg
+}
+
+func DeserializeRtMsg(b []byte) *RtMsg {
+ return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0]))
+}
+
+func (msg *RtMsg) Serialize() []byte {
+ return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go
new file mode 100644
index 0000000000..ba9c410ee1
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go
@@ -0,0 +1,43 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "syscall"
+ "testing"
+)
+
+func (msg *RtMsg) write(b []byte) {
+ native := NativeEndian()
+ b[0] = msg.Family
+ b[1] = msg.Dst_len
+ b[2] = msg.Src_len
+ b[3] = msg.Tos
+ b[4] = msg.Table
+ b[5] = msg.Protocol
+ b[6] = msg.Scope
+ b[7] = msg.Type
+ native.PutUint32(b[8:12], msg.Flags)
+}
+
+func (msg *RtMsg) serializeSafe() []byte {
+ len := syscall.SizeofRtMsg
+ b := make([]byte, len)
+ msg.write(b)
+ return b
+}
+
+func deserializeRtMsgSafe(b []byte) *RtMsg {
+ var msg = RtMsg{}
+ binary.Read(bytes.NewReader(b[0:syscall.SizeofRtMsg]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestRtMsgDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, syscall.SizeofRtMsg)
+ rand.Read(orig)
+ safemsg := deserializeRtMsgSafe(orig)
+ msg := DeserializeRtMsg(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
new file mode 100644
index 0000000000..d953130870
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -0,0 +1,259 @@
+package nl
+
+import (
+ "bytes"
+ "net"
+ "unsafe"
+)
+
+// Infinity for packet and byte counts
+const (
+ XFRM_INF = ^uint64(0)
+)
+
+// Message Types
+const (
+ XFRM_MSG_BASE = 0x10
+ XFRM_MSG_NEWSA = 0x10
+ XFRM_MSG_DELSA = 0x11
+ XFRM_MSG_GETSA = 0x12
+ XFRM_MSG_NEWPOLICY = 0x13
+ XFRM_MSG_DELPOLICY = 0x14
+ XFRM_MSG_GETPOLICY = 0x15
+ XFRM_MSG_ALLOCSPI = 0x16
+ XFRM_MSG_ACQUIRE = 0x17
+ XFRM_MSG_EXPIRE = 0x18
+ XFRM_MSG_UPDPOLICY = 0x19
+ XFRM_MSG_UPDSA = 0x1a
+ XFRM_MSG_POLEXPIRE = 0x1b
+ XFRM_MSG_FLUSHSA = 0x1c
+ XFRM_MSG_FLUSHPOLICY = 0x1d
+ XFRM_MSG_NEWAE = 0x1e
+ XFRM_MSG_GETAE = 0x1f
+ XFRM_MSG_REPORT = 0x20
+ XFRM_MSG_MIGRATE = 0x21
+ XFRM_MSG_NEWSADINFO = 0x22
+ XFRM_MSG_GETSADINFO = 0x23
+ XFRM_MSG_NEWSPDINFO = 0x24
+ XFRM_MSG_GETSPDINFO = 0x25
+ XFRM_MSG_MAPPING = 0x26
+ XFRM_MSG_MAX = 0x26
+ XFRM_NR_MSGTYPES = 0x17
+)
+
+// Attribute types
+const (
+ /* Netlink message attributes. */
+ XFRMA_UNSPEC = 0x00
+ XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */
+ XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */
+ XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */
+ XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */
+ XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA = 0x06 /* struct xfrm_usersa_info */
+ XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */
+ XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */
+ XFRMA_LTIME_VAL = 0x09
+ XFRMA_REPLAY_VAL = 0x0a
+ XFRMA_REPLAY_THRESH = 0x0b
+ XFRMA_ETIMER_THRESH = 0x0c
+ XFRMA_SRCADDR = 0x0d /* xfrm_address_t */
+ XFRMA_COADDR = 0x0e /* xfrm_address_t */
+ XFRMA_LASTUSED = 0x0f /* unsigned long */
+ XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE = 0x11
+ XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */
+ XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */
+ XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */
+ XFRMA_MARK = 0x15 /* struct xfrm_mark */
+ XFRMA_TFCPAD = 0x16 /* __u32 */
+ XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */
+ XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */
+ XFRMA_MAX = 0x18
+)
+
+const (
+ SizeofXfrmAddress = 0x10
+ SizeofXfrmSelector = 0x38
+ SizeofXfrmLifetimeCfg = 0x40
+ SizeofXfrmLifetimeCur = 0x20
+ SizeofXfrmId = 0x18
+)
+
+// typedef union {
+// __be32 a4;
+// __be32 a6[4];
+// } xfrm_address_t;
+
+type XfrmAddress [SizeofXfrmAddress]byte
+
+func (x *XfrmAddress) ToIP() net.IP {
+ var empty = [12]byte{}
+ ip := make(net.IP, net.IPv6len)
+ if bytes.Equal(x[4:16], empty[:]) {
+ ip[10] = 0xff
+ ip[11] = 0xff
+ copy(ip[12:16], x[0:4])
+ } else {
+ copy(ip[:], x[:])
+ }
+ return ip
+}
+
+func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet {
+ ip := x.ToIP()
+ if GetIPFamily(ip) == FAMILY_V4 {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)}
+ } else {
+ return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)}
+ }
+}
+
+func (x *XfrmAddress) FromIP(ip net.IP) {
+ var empty = [16]byte{}
+ if len(ip) < net.IPv4len {
+ copy(x[4:16], empty[:])
+ } else if GetIPFamily(ip) == FAMILY_V4 {
+ copy(x[0:4], ip.To4()[0:4])
+ copy(x[4:16], empty[:12])
+ } else {
+ copy(x[0:16], ip.To16()[0:16])
+ }
+}
+
+func DeserializeXfrmAddress(b []byte) *XfrmAddress {
+ return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0]))
+}
+
+func (msg *XfrmAddress) Serialize() []byte {
+ return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_selector {
+// xfrm_address_t daddr;
+// xfrm_address_t saddr;
+// __be16 dport;
+// __be16 dport_mask;
+// __be16 sport;
+// __be16 sport_mask;
+// __u16 family;
+// __u8 prefixlen_d;
+// __u8 prefixlen_s;
+// __u8 proto;
+// int ifindex;
+// __kernel_uid32_t user;
+// };
+
+type XfrmSelector struct {
+ Daddr XfrmAddress
+ Saddr XfrmAddress
+ Dport uint16 // big endian
+ DportMask uint16 // big endian
+ Sport uint16 // big endian
+ SportMask uint16 // big endian
+ Family uint16
+ PrefixlenD uint8
+ PrefixlenS uint8
+ Proto uint8
+ Pad [3]byte
+ Ifindex int32
+ User uint32
+}
+
+func (msg *XfrmSelector) Len() int {
+ return SizeofXfrmSelector
+}
+
+func DeserializeXfrmSelector(b []byte) *XfrmSelector {
+ return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0]))
+}
+
+func (msg *XfrmSelector) Serialize() []byte {
+ return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cfg {
+// __u64 soft_byte_limit;
+// __u64 hard_byte_limit;
+// __u64 soft_packet_limit;
+// __u64 hard_packet_limit;
+// __u64 soft_add_expires_seconds;
+// __u64 hard_add_expires_seconds;
+// __u64 soft_use_expires_seconds;
+// __u64 hard_use_expires_seconds;
+// };
+//
+
+type XfrmLifetimeCfg struct {
+ SoftByteLimit uint64
+ HardByteLimit uint64
+ SoftPacketLimit uint64
+ HardPacketLimit uint64
+ SoftAddExpiresSeconds uint64
+ HardAddExpiresSeconds uint64
+ SoftUseExpiresSeconds uint64
+ HardUseExpiresSeconds uint64
+}
+
+func (msg *XfrmLifetimeCfg) Len() int {
+ return SizeofXfrmLifetimeCfg
+}
+
+func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg {
+ return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0]))
+}
+
+func (msg *XfrmLifetimeCfg) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cur {
+// __u64 bytes;
+// __u64 packets;
+// __u64 add_time;
+// __u64 use_time;
+// };
+
+type XfrmLifetimeCur struct {
+ Bytes uint64
+ Packets uint64
+ AddTime uint64
+ UseTime uint64
+}
+
+func (msg *XfrmLifetimeCur) Len() int {
+ return SizeofXfrmLifetimeCur
+}
+
+func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur {
+ return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0]))
+}
+
+func (msg *XfrmLifetimeCur) Serialize() []byte {
+ return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u8 proto;
+// };
+
+type XfrmId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Proto uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmId) Len() int {
+ return SizeofXfrmId
+}
+
+func DeserializeXfrmId(b []byte) *XfrmId {
+ return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0]))
+}
+
+func (msg *XfrmId) Serialize() []byte {
+ return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
new file mode 100644
index 0000000000..04404d7511
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
@@ -0,0 +1,161 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "testing"
+)
+
+func (msg *XfrmAddress) write(b []byte) {
+ copy(b[0:SizeofXfrmAddress], msg[:])
+}
+
+func (msg *XfrmAddress) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmAddress)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmAddressSafe(b []byte) *XfrmAddress {
+ var msg = XfrmAddress{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmAddress]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmAddressDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmAddress)
+ rand.Read(orig)
+ safemsg := deserializeXfrmAddressSafe(orig)
+ msg := DeserializeXfrmAddress(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmSelector) write(b []byte) {
+ const AddrEnd = SizeofXfrmAddress * 2
+ native := NativeEndian()
+ msg.Daddr.write(b[0:SizeofXfrmAddress])
+ msg.Saddr.write(b[SizeofXfrmAddress:AddrEnd])
+ native.PutUint16(b[AddrEnd:AddrEnd+2], msg.Dport)
+ native.PutUint16(b[AddrEnd+2:AddrEnd+4], msg.DportMask)
+ native.PutUint16(b[AddrEnd+4:AddrEnd+6], msg.Sport)
+ native.PutUint16(b[AddrEnd+6:AddrEnd+8], msg.SportMask)
+ native.PutUint16(b[AddrEnd+8:AddrEnd+10], msg.Family)
+ b[AddrEnd+10] = msg.PrefixlenD
+ b[AddrEnd+11] = msg.PrefixlenS
+ b[AddrEnd+12] = msg.Proto
+ copy(b[AddrEnd+13:AddrEnd+16], msg.Pad[:])
+ native.PutUint32(b[AddrEnd+16:AddrEnd+20], uint32(msg.Ifindex))
+ native.PutUint32(b[AddrEnd+20:AddrEnd+24], msg.User)
+}
+
+func (msg *XfrmSelector) serializeSafe() []byte {
+ length := SizeofXfrmSelector
+ b := make([]byte, length)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmSelectorSafe(b []byte) *XfrmSelector {
+ var msg = XfrmSelector{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmSelector]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmSelectorDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmSelector)
+ rand.Read(orig)
+ safemsg := deserializeXfrmSelectorSafe(orig)
+ msg := DeserializeXfrmSelector(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmLifetimeCfg) write(b []byte) {
+ native := NativeEndian()
+ native.PutUint64(b[0:8], msg.SoftByteLimit)
+ native.PutUint64(b[8:16], msg.HardByteLimit)
+ native.PutUint64(b[16:24], msg.SoftPacketLimit)
+ native.PutUint64(b[24:32], msg.HardPacketLimit)
+ native.PutUint64(b[32:40], msg.SoftAddExpiresSeconds)
+ native.PutUint64(b[40:48], msg.HardAddExpiresSeconds)
+ native.PutUint64(b[48:56], msg.SoftUseExpiresSeconds)
+ native.PutUint64(b[56:64], msg.HardUseExpiresSeconds)
+}
+
+func (msg *XfrmLifetimeCfg) serializeSafe() []byte {
+ length := SizeofXfrmLifetimeCfg
+ b := make([]byte, length)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmLifetimeCfgSafe(b []byte) *XfrmLifetimeCfg {
+ var msg = XfrmLifetimeCfg{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCfg]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmLifetimeCfgDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmLifetimeCfg)
+ rand.Read(orig)
+ safemsg := deserializeXfrmLifetimeCfgSafe(orig)
+ msg := DeserializeXfrmLifetimeCfg(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmLifetimeCur) write(b []byte) {
+ native := NativeEndian()
+ native.PutUint64(b[0:8], msg.Bytes)
+ native.PutUint64(b[8:16], msg.Packets)
+ native.PutUint64(b[16:24], msg.AddTime)
+ native.PutUint64(b[24:32], msg.UseTime)
+}
+
+func (msg *XfrmLifetimeCur) serializeSafe() []byte {
+ length := SizeofXfrmLifetimeCur
+ b := make([]byte, length)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmLifetimeCurSafe(b []byte) *XfrmLifetimeCur {
+ var msg = XfrmLifetimeCur{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCur]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmLifetimeCurDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmLifetimeCur)
+ rand.Read(orig)
+ safemsg := deserializeXfrmLifetimeCurSafe(orig)
+ msg := DeserializeXfrmLifetimeCur(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmId) write(b []byte) {
+ native := NativeEndian()
+ msg.Daddr.write(b[0:SizeofXfrmAddress])
+ native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
+ b[SizeofXfrmAddress+4] = msg.Proto
+ copy(b[SizeofXfrmAddress+5:SizeofXfrmAddress+8], msg.Pad[:])
+}
+
+func (msg *XfrmId) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmId)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmIdSafe(b []byte) *XfrmId {
+ var msg = XfrmId{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmId]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmIdDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmId)
+ rand.Read(orig)
+ safemsg := deserializeXfrmIdSafe(orig)
+ msg := DeserializeXfrmId(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
new file mode 100644
index 0000000000..66f7e03d2d
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
@@ -0,0 +1,119 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUserpolicyId = 0x40
+ SizeofXfrmUserpolicyInfo = 0xa8
+ SizeofXfrmUserTmpl = 0x40
+)
+
+// struct xfrm_userpolicy_id {
+// struct xfrm_selector sel;
+// __u32 index;
+// __u8 dir;
+// };
+//
+
+type XfrmUserpolicyId struct {
+ Sel XfrmSelector
+ Index uint32
+ Dir uint8
+ Pad [3]byte
+}
+
+func (msg *XfrmUserpolicyId) Len() int {
+ return SizeofXfrmUserpolicyId
+}
+
+func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId {
+ return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0]))
+}
+
+func (msg *XfrmUserpolicyId) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_userpolicy_info {
+// struct xfrm_selector sel;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// __u32 priority;
+// __u32 index;
+// __u8 dir;
+// __u8 action;
+// #define XFRM_POLICY_ALLOW 0
+// #define XFRM_POLICY_BLOCK 1
+// __u8 flags;
+// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */
+// /* Automatically expand selector to include matching ICMP payloads. */
+// #define XFRM_POLICY_ICMP 2
+// __u8 share;
+// };
+
+type XfrmUserpolicyInfo struct {
+ Sel XfrmSelector
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Priority uint32
+ Index uint32
+ Dir uint8
+ Action uint8
+ Flags uint8
+ Share uint8
+ Pad [4]byte
+}
+
+func (msg *XfrmUserpolicyInfo) Len() int {
+ return SizeofXfrmUserpolicyInfo
+}
+
+func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo {
+ return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0]))
+}
+
+func (msg *XfrmUserpolicyInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_user_tmpl {
+// struct xfrm_id id;
+// __u16 family;
+// xfrm_address_t saddr;
+// __u32 reqid;
+// __u8 mode;
+// __u8 share;
+// __u8 optional;
+// __u32 aalgos;
+// __u32 ealgos;
+// __u32 calgos;
+// }
+
+type XfrmUserTmpl struct {
+ XfrmId XfrmId
+ Family uint16
+ Pad1 [2]byte
+ Saddr XfrmAddress
+ Reqid uint32
+ Mode uint8
+ Share uint8
+ Optional uint8
+ Pad2 byte
+ Aalgos uint32
+ Ealgos uint32
+ Calgos uint32
+}
+
+func (msg *XfrmUserTmpl) Len() int {
+ return SizeofXfrmUserTmpl
+}
+
+func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl {
+ return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0]))
+}
+
+func (msg *XfrmUserTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
new file mode 100644
index 0000000000..08a604b9cc
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
@@ -0,0 +1,109 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "testing"
+)
+
+func (msg *XfrmUserpolicyId) write(b []byte) {
+ native := NativeEndian()
+ msg.Sel.write(b[0:SizeofXfrmSelector])
+ native.PutUint32(b[SizeofXfrmSelector:SizeofXfrmSelector+4], msg.Index)
+ b[SizeofXfrmSelector+4] = msg.Dir
+ copy(b[SizeofXfrmSelector+5:SizeofXfrmSelector+8], msg.Pad[:])
+}
+
+func (msg *XfrmUserpolicyId) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmUserpolicyId)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmUserpolicyIdSafe(b []byte) *XfrmUserpolicyId {
+ var msg = XfrmUserpolicyId{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyId]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmUserpolicyIdDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmUserpolicyId)
+ rand.Read(orig)
+ safemsg := deserializeXfrmUserpolicyIdSafe(orig)
+ msg := DeserializeXfrmUserpolicyId(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUserpolicyInfo) write(b []byte) {
+ const CfgEnd = SizeofXfrmSelector + SizeofXfrmLifetimeCfg
+ const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
+ native := NativeEndian()
+ msg.Sel.write(b[0:SizeofXfrmSelector])
+ msg.Lft.write(b[SizeofXfrmSelector:CfgEnd])
+ msg.Curlft.write(b[CfgEnd:CurEnd])
+ native.PutUint32(b[CurEnd:CurEnd+4], msg.Priority)
+ native.PutUint32(b[CurEnd+4:CurEnd+8], msg.Index)
+ b[CurEnd+8] = msg.Dir
+ b[CurEnd+9] = msg.Action
+ b[CurEnd+10] = msg.Flags
+ b[CurEnd+11] = msg.Share
+ copy(b[CurEnd+12:CurEnd+16], msg.Pad[:])
+}
+
+func (msg *XfrmUserpolicyInfo) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmUserpolicyInfo)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmUserpolicyInfoSafe(b []byte) *XfrmUserpolicyInfo {
+ var msg = XfrmUserpolicyInfo{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyInfo]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmUserpolicyInfoDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmUserpolicyInfo)
+ rand.Read(orig)
+ safemsg := deserializeXfrmUserpolicyInfoSafe(orig)
+ msg := DeserializeXfrmUserpolicyInfo(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUserTmpl) write(b []byte) {
+ const AddrEnd = SizeofXfrmId + 4 + SizeofXfrmAddress
+ native := NativeEndian()
+ msg.XfrmId.write(b[0:SizeofXfrmId])
+ native.PutUint16(b[SizeofXfrmId:SizeofXfrmId+2], msg.Family)
+ copy(b[SizeofXfrmId+2:SizeofXfrmId+4], msg.Pad1[:])
+ msg.Saddr.write(b[SizeofXfrmId+4 : AddrEnd])
+ native.PutUint32(b[AddrEnd:AddrEnd+4], msg.Reqid)
+ b[AddrEnd+4] = msg.Mode
+ b[AddrEnd+5] = msg.Share
+ b[AddrEnd+6] = msg.Optional
+ b[AddrEnd+7] = msg.Pad2
+ native.PutUint32(b[AddrEnd+8:AddrEnd+12], msg.Aalgos)
+ native.PutUint32(b[AddrEnd+12:AddrEnd+16], msg.Ealgos)
+ native.PutUint32(b[AddrEnd+16:AddrEnd+20], msg.Calgos)
+}
+
+func (msg *XfrmUserTmpl) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmUserTmpl)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmUserTmplSafe(b []byte) *XfrmUserTmpl {
+ var msg = XfrmUserTmpl{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmUserTmpl]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmUserTmplDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmUserTmpl)
+ rand.Read(orig)
+ safemsg := deserializeXfrmUserTmplSafe(orig)
+ msg := DeserializeXfrmUserTmpl(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
new file mode 100644
index 0000000000..4876ce4583
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -0,0 +1,221 @@
+package nl
+
+import (
+ "unsafe"
+)
+
+const (
+ SizeofXfrmUsersaId = 0x18
+ SizeofXfrmStats = 0x0c
+ SizeofXfrmUsersaInfo = 0xe0
+ SizeofXfrmAlgo = 0x44
+ SizeofXfrmAlgoAuth = 0x48
+ SizeofXfrmEncapTmpl = 0x18
+)
+
+// struct xfrm_usersa_id {
+// xfrm_address_t daddr;
+// __be32 spi;
+// __u16 family;
+// __u8 proto;
+// };
+
+type XfrmUsersaId struct {
+ Daddr XfrmAddress
+ Spi uint32 // big endian
+ Family uint16
+ Proto uint8
+ Pad byte
+}
+
+func (msg *XfrmUsersaId) Len() int {
+ return SizeofXfrmUsersaId
+}
+
+func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId {
+ return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0]))
+}
+
+func (msg *XfrmUsersaId) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_stats {
+// __u32 replay_window;
+// __u32 replay;
+// __u32 integrity_failed;
+// };
+
+type XfrmStats struct {
+ ReplayWindow uint32
+ Replay uint32
+ IntegrityFailed uint32
+}
+
+func (msg *XfrmStats) Len() int {
+ return SizeofXfrmStats
+}
+
+func DeserializeXfrmStats(b []byte) *XfrmStats {
+ return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0]))
+}
+
+func (msg *XfrmStats) Serialize() []byte {
+ return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_usersa_info {
+// struct xfrm_selector sel;
+// struct xfrm_id id;
+// xfrm_address_t saddr;
+// struct xfrm_lifetime_cfg lft;
+// struct xfrm_lifetime_cur curlft;
+// struct xfrm_stats stats;
+// __u32 seq;
+// __u32 reqid;
+// __u16 family;
+// __u8 mode; /* XFRM_MODE_xxx */
+// __u8 replay_window;
+// __u8 flags;
+// #define XFRM_STATE_NOECN 1
+// #define XFRM_STATE_DECAP_DSCP 2
+// #define XFRM_STATE_NOPMTUDISC 4
+// #define XFRM_STATE_WILDRECV 8
+// #define XFRM_STATE_ICMP 16
+// #define XFRM_STATE_AF_UNSPEC 32
+// #define XFRM_STATE_ALIGN4 64
+// #define XFRM_STATE_ESN 128
+// };
+//
+// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+//
+
+type XfrmUsersaInfo struct {
+ Sel XfrmSelector
+ Id XfrmId
+ Saddr XfrmAddress
+ Lft XfrmLifetimeCfg
+ Curlft XfrmLifetimeCur
+ Stats XfrmStats
+ Seq uint32
+ Reqid uint32
+ Family uint16
+ Mode uint8
+ ReplayWindow uint8
+ Flags uint8
+ Pad [7]byte
+}
+
+func (msg *XfrmUsersaInfo) Len() int {
+ return SizeofXfrmUsersaInfo
+}
+
+func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo {
+ return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0]))
+}
+
+func (msg *XfrmUsersaInfo) Serialize() []byte {
+ return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_algo {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgo struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgo) Len() int {
+ return SizeofXfrmAlgo + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgo(b []byte) *XfrmAlgo {
+ ret := XfrmAlgo{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgKey = b[68:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgo) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_auth {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_trunc_len; /* in bits */
+// char alg_key[0];
+// };
+
+type XfrmAlgoAuth struct {
+ AlgName [64]byte
+ AlgKeyLen uint32
+ AlgTruncLen uint32
+ AlgKey []byte
+}
+
+func (msg *XfrmAlgoAuth) Len() int {
+ return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth {
+ ret := XfrmAlgoAuth{}
+ copy(ret.AlgName[:], b[0:64])
+ ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+ ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68]))
+ ret.AlgKey = b[72:ret.Len()]
+ return &ret
+}
+
+func (msg *XfrmAlgoAuth) Serialize() []byte {
+ b := make([]byte, msg.Len())
+ copy(b[0:64], msg.AlgName[:])
+ copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+ copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:])
+ copy(b[72:msg.Len()], msg.AlgKey[:])
+ return b
+}
+
+// struct xfrm_algo_aead {
+// char alg_name[64];
+// unsigned int alg_key_len; /* in bits */
+// unsigned int alg_icv_len; /* in bits */
+// char alg_key[0];
+// }
+
+// struct xfrm_encap_tmpl {
+// __u16 encap_type;
+// __be16 encap_sport;
+// __be16 encap_dport;
+// xfrm_address_t encap_oa;
+// };
+
+type XfrmEncapTmpl struct {
+ EncapType uint16
+ EncapSport uint16 // big endian
+ EncapDport uint16 // big endian
+ Pad [2]byte
+ EncapOa XfrmAddress
+}
+
+func (msg *XfrmEncapTmpl) Len() int {
+ return SizeofXfrmEncapTmpl
+}
+
+func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl {
+ return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0]))
+}
+
+func (msg *XfrmEncapTmpl) Serialize() []byte {
+ return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
new file mode 100644
index 0000000000..d5281e9a64
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
@@ -0,0 +1,207 @@
+package nl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "testing"
+)
+
+func (msg *XfrmUsersaId) write(b []byte) {
+ native := NativeEndian()
+ msg.Daddr.write(b[0:SizeofXfrmAddress])
+ native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
+ native.PutUint16(b[SizeofXfrmAddress+4:SizeofXfrmAddress+6], msg.Family)
+ b[SizeofXfrmAddress+6] = msg.Proto
+ b[SizeofXfrmAddress+7] = msg.Pad
+}
+
+func (msg *XfrmUsersaId) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmUsersaId)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmUsersaIdSafe(b []byte) *XfrmUsersaId {
+ var msg = XfrmUsersaId{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaId]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmUsersaIdDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmUsersaId)
+ rand.Read(orig)
+ safemsg := deserializeXfrmUsersaIdSafe(orig)
+ msg := DeserializeXfrmUsersaId(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmStats) write(b []byte) {
+ native := NativeEndian()
+ native.PutUint32(b[0:4], msg.ReplayWindow)
+ native.PutUint32(b[4:8], msg.Replay)
+ native.PutUint32(b[8:12], msg.IntegrityFailed)
+}
+
+func (msg *XfrmStats) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmStats)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmStatsSafe(b []byte) *XfrmStats {
+ var msg = XfrmStats{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmStats]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmStatsDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmStats)
+ rand.Read(orig)
+ safemsg := deserializeXfrmStatsSafe(orig)
+ msg := DeserializeXfrmStats(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUsersaInfo) write(b []byte) {
+ const IdEnd = SizeofXfrmSelector + SizeofXfrmId
+ const AddressEnd = IdEnd + SizeofXfrmAddress
+ const CfgEnd = AddressEnd + SizeofXfrmLifetimeCfg
+ const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
+ const StatsEnd = CurEnd + SizeofXfrmStats
+ native := NativeEndian()
+ msg.Sel.write(b[0:SizeofXfrmSelector])
+ msg.Id.write(b[SizeofXfrmSelector:IdEnd])
+ msg.Saddr.write(b[IdEnd:AddressEnd])
+ msg.Lft.write(b[AddressEnd:CfgEnd])
+ msg.Curlft.write(b[CfgEnd:CurEnd])
+ msg.Stats.write(b[CurEnd:StatsEnd])
+ native.PutUint32(b[StatsEnd:StatsEnd+4], msg.Seq)
+ native.PutUint32(b[StatsEnd+4:StatsEnd+8], msg.Reqid)
+ native.PutUint16(b[StatsEnd+8:StatsEnd+10], msg.Family)
+ b[StatsEnd+10] = msg.Mode
+ b[StatsEnd+11] = msg.ReplayWindow
+ b[StatsEnd+12] = msg.Flags
+ copy(b[StatsEnd+13:StatsEnd+20], msg.Pad[:])
+}
+
+func (msg *XfrmUsersaInfo) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmUsersaInfo)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmUsersaInfoSafe(b []byte) *XfrmUsersaInfo {
+ var msg = XfrmUsersaInfo{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaInfo]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmUsersaInfoDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmUsersaInfo)
+ rand.Read(orig)
+ safemsg := deserializeXfrmUsersaInfoSafe(orig)
+ msg := DeserializeXfrmUsersaInfo(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmAlgo) write(b []byte) {
+ native := NativeEndian()
+ copy(b[0:64], msg.AlgName[:])
+ native.PutUint32(b[64:68], msg.AlgKeyLen)
+ copy(b[68:msg.Len()], msg.AlgKey[:])
+}
+
+func (msg *XfrmAlgo) serializeSafe() []byte {
+ b := make([]byte, msg.Len())
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmAlgoSafe(b []byte) *XfrmAlgo {
+ var msg = XfrmAlgo{}
+ copy(msg.AlgName[:], b[0:64])
+ binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
+ msg.AlgKey = b[68:msg.Len()]
+ return &msg
+}
+
+func TestXfrmAlgoDeserializeSerialize(t *testing.T) {
+ // use a 32 byte key len
+ var orig = make([]byte, SizeofXfrmAlgo+32)
+ rand.Read(orig)
+ // set the key len to 256 bits
+ orig[64] = 0
+ orig[65] = 1
+ orig[66] = 0
+ orig[67] = 0
+ safemsg := deserializeXfrmAlgoSafe(orig)
+ msg := DeserializeXfrmAlgo(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmAlgoAuth) write(b []byte) {
+ native := NativeEndian()
+ copy(b[0:64], msg.AlgName[:])
+ native.PutUint32(b[64:68], msg.AlgKeyLen)
+ native.PutUint32(b[68:72], msg.AlgTruncLen)
+ copy(b[72:msg.Len()], msg.AlgKey[:])
+}
+
+func (msg *XfrmAlgoAuth) serializeSafe() []byte {
+ b := make([]byte, msg.Len())
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmAlgoAuthSafe(b []byte) *XfrmAlgoAuth {
+ var msg = XfrmAlgoAuth{}
+ copy(msg.AlgName[:], b[0:64])
+ binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
+ binary.Read(bytes.NewReader(b[68:72]), NativeEndian(), &msg.AlgTruncLen)
+ msg.AlgKey = b[72:msg.Len()]
+ return &msg
+}
+
+func TestXfrmAlgoAuthDeserializeSerialize(t *testing.T) {
+ // use a 32 byte key len
+ var orig = make([]byte, SizeofXfrmAlgoAuth+32)
+ rand.Read(orig)
+ // set the key len to 256 bits
+ orig[64] = 0
+ orig[65] = 1
+ orig[66] = 0
+ orig[67] = 0
+ safemsg := deserializeXfrmAlgoAuthSafe(orig)
+ msg := DeserializeXfrmAlgoAuth(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmEncapTmpl) write(b []byte) {
+ native := NativeEndian()
+ native.PutUint16(b[0:2], msg.EncapType)
+ native.PutUint16(b[2:4], msg.EncapSport)
+ native.PutUint16(b[4:6], msg.EncapDport)
+ copy(b[6:8], msg.Pad[:])
+ msg.EncapOa.write(b[8:SizeofXfrmAddress])
+}
+
+func (msg *XfrmEncapTmpl) serializeSafe() []byte {
+ b := make([]byte, SizeofXfrmEncapTmpl)
+ msg.write(b)
+ return b
+}
+
+func deserializeXfrmEncapTmplSafe(b []byte) *XfrmEncapTmpl {
+ var msg = XfrmEncapTmpl{}
+ binary.Read(bytes.NewReader(b[0:SizeofXfrmEncapTmpl]), NativeEndian(), &msg)
+ return &msg
+}
+
+func TestXfrmEncapTmplDeserializeSerialize(t *testing.T) {
+ var orig = make([]byte, SizeofXfrmEncapTmpl)
+ rand.Read(orig)
+ safemsg := deserializeXfrmEncapTmplSafe(orig)
+ msg := DeserializeXfrmEncapTmpl(orig)
+ testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo.go b/vendor/src/github.com/vishvananda/netlink/protinfo.go
new file mode 100644
index 0000000000..79396da7ca
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+ "strings"
+)
+
+// Protinfo represents bridge flags from netlink.
+type Protinfo struct {
+ Hairpin bool
+ Guard bool
+ FastLeave bool
+ RootBlock bool
+ Learning bool
+ Flood bool
+}
+
+// String returns a list of enabled flags
+func (prot *Protinfo) String() string {
+ boolStrings := make([]string, 0)
+ if prot.Hairpin {
+ boolStrings = append(boolStrings, "Hairpin")
+ }
+ if prot.Guard {
+ boolStrings = append(boolStrings, "Guard")
+ }
+ if prot.FastLeave {
+ boolStrings = append(boolStrings, "FastLeave")
+ }
+ if prot.RootBlock {
+ boolStrings = append(boolStrings, "RootBlock")
+ }
+ if prot.Learning {
+ boolStrings = append(boolStrings, "Learning")
+ }
+ if prot.Flood {
+ boolStrings = append(boolStrings, "Flood")
+ }
+ return strings.Join(boolStrings, " ")
+}
+
+func boolToByte(x bool) []byte {
+ if x {
+ return []byte{1}
+ }
+ return []byte{0}
+}
+
+func byteToBool(x byte) bool {
+ if uint8(x) != 0 {
+ return true
+ }
+ return false
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
new file mode 100644
index 0000000000..7181eba100
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
@@ -0,0 +1,60 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func LinkGetProtinfo(link Link) (Protinfo, error) {
+ base := link.Attrs()
+ ensureIndex(base)
+ var pi Protinfo
+ req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ req.AddData(msg)
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return pi, err
+ }
+
+ for _, m := range msgs {
+ ans := nl.DeserializeIfInfomsg(m)
+ if int(ans.Index) != base.Index {
+ continue
+ }
+ attrs, err := nl.ParseRouteAttr(m[ans.Len():])
+ if err != nil {
+ return pi, err
+ }
+ for _, attr := range attrs {
+ if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED {
+ continue
+ }
+ infos, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return pi, err
+ }
+ var pi Protinfo
+ for _, info := range infos {
+ switch info.Attr.Type {
+ case nl.IFLA_BRPORT_MODE:
+ pi.Hairpin = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_GUARD:
+ pi.Guard = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_FAST_LEAVE:
+ pi.FastLeave = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_PROTECT:
+ pi.RootBlock = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_LEARNING:
+ pi.Learning = byteToBool(info.Value[0])
+ case nl.IFLA_BRPORT_UNICAST_FLOOD:
+ pi.Flood = byteToBool(info.Value[0])
+ }
+ }
+ return pi, nil
+ }
+ }
+ return pi, fmt.Errorf("Device with index %d not found", base.Index)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo_test.go b/vendor/src/github.com/vishvananda/netlink/protinfo_test.go
new file mode 100644
index 0000000000..f94c42b1c7
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo_test.go
@@ -0,0 +1,98 @@
+package netlink
+
+import "testing"
+
+func TestProtinfo(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+ master := &Bridge{LinkAttrs{Name: "foo"}}
+ if err := LinkAdd(master); err != nil {
+ t.Fatal(err)
+ }
+ iface1 := &Dummy{LinkAttrs{Name: "bar1", MasterIndex: master.Index}}
+ iface2 := &Dummy{LinkAttrs{Name: "bar2", MasterIndex: master.Index}}
+ iface3 := &Dummy{LinkAttrs{Name: "bar3"}}
+
+ if err := LinkAdd(iface1); err != nil {
+ t.Fatal(err)
+ }
+ if err := LinkAdd(iface2); err != nil {
+ t.Fatal(err)
+ }
+ if err := LinkAdd(iface3); err != nil {
+ t.Fatal(err)
+ }
+
+ oldpi1, err := LinkGetProtinfo(iface1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ oldpi2, err := LinkGetProtinfo(iface2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := LinkSetHairpin(iface1, true); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := LinkSetRootBlock(iface1, true); err != nil {
+ t.Fatal(err)
+ }
+
+ pi1, err := LinkGetProtinfo(iface1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !pi1.Hairpin {
+ t.Fatalf("Hairpin mode is not enabled for %s, but should", iface1.Name)
+ }
+ if !pi1.RootBlock {
+ t.Fatalf("RootBlock is not enabled for %s, but should", iface1.Name)
+ }
+ if pi1.Guard != oldpi1.Guard {
+ t.Fatalf("Guard field was changed for %s but shouldn't", iface1.Name)
+ }
+ if pi1.FastLeave != oldpi1.FastLeave {
+ t.Fatalf("FastLeave field was changed for %s but shouldn't", iface1.Name)
+ }
+ if pi1.Learning != oldpi1.Learning {
+ t.Fatalf("Learning field was changed for %s but shouldn't", iface1.Name)
+ }
+ if pi1.Flood != oldpi1.Flood {
+ t.Fatalf("Flood field was changed for %s but shouldn't", iface1.Name)
+ }
+
+ if err := LinkSetGuard(iface2, true); err != nil {
+ t.Fatal(err)
+ }
+ if err := LinkSetLearning(iface2, false); err != nil {
+ t.Fatal(err)
+ }
+ pi2, err := LinkGetProtinfo(iface2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pi2.Hairpin {
+ t.Fatalf("Hairpin mode is enabled for %s, but shouldn't", iface2.Name)
+ }
+ if !pi2.Guard {
+ t.Fatalf("Guard is not enabled for %s, but should", iface2.Name)
+ }
+ if pi2.Learning {
+ t.Fatalf("Learning is enabled for %s, but shouldn't", iface2.Name)
+ }
+ if pi2.RootBlock != oldpi2.RootBlock {
+ t.Fatalf("RootBlock field was changed for %s but shouldn't", iface2.Name)
+ }
+ if pi2.FastLeave != oldpi2.FastLeave {
+ t.Fatalf("FastLeave field was changed for %s but shouldn't", iface2.Name)
+ }
+ if pi2.Flood != oldpi2.Flood {
+ t.Fatalf("Flood field was changed for %s but shouldn't", iface2.Name)
+ }
+
+ if err := LinkSetHairpin(iface3, true); err == nil || err.Error() != "operation not supported" {
+ t.Fatalf("Set protinfo attrs for link without master is not supported, but err: %s", err)
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route.go b/vendor/src/github.com/vishvananda/netlink/route.go
new file mode 100644
index 0000000000..6218546f80
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route.go
@@ -0,0 +1,35 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+)
+
+// Scope is an enum representing a route scope.
+type Scope uint8
+
+const (
+ SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE
+ SCOPE_SITE Scope = syscall.RT_SCOPE_SITE
+ SCOPE_LINK Scope = syscall.RT_SCOPE_LINK
+ SCOPE_HOST Scope = syscall.RT_SCOPE_HOST
+ SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE
+)
+
+// Route represents a netlink route. A route is associated with a link,
+// has a destination network, an optional source ip, and optional
+// gateway. Advanced route parameters and non-main routing tables are
+// currently not supported.
+type Route struct {
+ LinkIndex int
+ Scope Scope
+ Dst *net.IPNet
+ Src net.IP
+ Gw net.IP
+}
+
+func (r Route) String() string {
+ return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s}", r.LinkIndex, r.Dst,
+ r.Src, r.Gw)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route_linux.go b/vendor/src/github.com/vishvananda/netlink/route_linux.go
new file mode 100644
index 0000000000..43872aa417
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route_linux.go
@@ -0,0 +1,225 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// RtAttr is shared so it is in netlink_linux.go
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func RouteAdd(route *Route) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+ return routeHandle(route, req)
+}
+
+// RouteAdd will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func RouteDel(route *Route) error {
+ req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
+ return routeHandle(route, req)
+}
+
+func routeHandle(route *Route, req *nl.NetlinkRequest) error {
+ if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil {
+ return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
+ }
+
+ msg := nl.NewRtMsg()
+ msg.Scope = uint8(route.Scope)
+ family := -1
+ var rtAttrs []*nl.RtAttr
+
+ if route.Dst != nil && route.Dst.IP != nil {
+ dstLen, _ := route.Dst.Mask.Size()
+ msg.Dst_len = uint8(dstLen)
+ dstFamily := nl.GetIPFamily(route.Dst.IP)
+ family = dstFamily
+ var dstData []byte
+ if dstFamily == FAMILY_V4 {
+ dstData = route.Dst.IP.To4()
+ } else {
+ dstData = route.Dst.IP.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
+ }
+
+ if route.Src != nil {
+ srcFamily := nl.GetIPFamily(route.Src)
+ if family != -1 && family != srcFamily {
+ return fmt.Errorf("source and destination ip are not the same IP family")
+ }
+ family = srcFamily
+ var srcData []byte
+ if srcFamily == FAMILY_V4 {
+ srcData = route.Src.To4()
+ } else {
+ srcData = route.Src.To16()
+ }
+ // The commonly used src ip for routes is actually PREFSRC
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData))
+ }
+
+ if route.Gw != nil {
+ gwFamily := nl.GetIPFamily(route.Gw)
+ if family != -1 && family != gwFamily {
+ return fmt.Errorf("gateway, source, and destination ip are not the same IP family")
+ }
+ family = gwFamily
+ var gwData []byte
+ if gwFamily == FAMILY_V4 {
+ gwData = route.Gw.To4()
+ } else {
+ gwData = route.Gw.To16()
+ }
+ rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData))
+ }
+
+ msg.Family = uint8(family)
+
+ req.AddData(msg)
+ for _, attr := range rtAttrs {
+ req.AddData(attr)
+ }
+
+ var (
+ b = make([]byte, 4)
+ native = nl.NativeEndian()
+ )
+ native.PutUint32(b, uint32(route.LinkIndex))
+
+ req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b))
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func RouteList(link Link, family int) ([]Route, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ index := 0
+ if link != nil {
+ base := link.Attrs()
+ ensureIndex(base)
+ index = base.Index
+ }
+
+ native := nl.NativeEndian()
+ res := make([]Route, 0)
+ for _, m := range msgs {
+ msg := nl.DeserializeRtMsg(m)
+
+ if msg.Flags&syscall.RTM_F_CLONED != 0 {
+ // Ignore cloned routes
+ continue
+ }
+
+ if msg.Table != syscall.RT_TABLE_MAIN {
+ // Ignore non-main tables
+ continue
+ }
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ route := Route{Scope: Scope(msg.Scope)}
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ route.Gw = net.IP(attr.Value)
+ case syscall.RTA_PREFSRC:
+ route.Src = net.IP(attr.Value)
+ case syscall.RTA_DST:
+ route.Dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+ }
+ case syscall.RTA_OIF:
+ routeIndex := int(native.Uint32(attr.Value[0:4]))
+ if link != nil && routeIndex != index {
+ // Ignore routes from other interfaces
+ continue
+ }
+ route.LinkIndex = routeIndex
+ }
+ }
+ res = append(res, route)
+ }
+
+ return res, nil
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func RouteGet(destination net.IP) ([]Route, error) {
+ req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
+ family := nl.GetIPFamily(destination)
+ var destinationData []byte
+ var bitlen uint8
+ if family == FAMILY_V4 {
+ destinationData = destination.To4()
+ bitlen = 32
+ } else {
+ destinationData = destination.To16()
+ bitlen = 128
+ }
+ msg := &nl.RtMsg{}
+ msg.Family = uint8(family)
+ msg.Dst_len = bitlen
+ req.AddData(msg)
+
+ rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData)
+ req.AddData(rtaDst)
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+ if err != nil {
+ return nil, err
+ }
+
+ native := nl.NativeEndian()
+ res := make([]Route, 0)
+ for _, m := range msgs {
+ msg := nl.DeserializeRtMsg(m)
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ route := Route{}
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case syscall.RTA_GATEWAY:
+ route.Gw = net.IP(attr.Value)
+ case syscall.RTA_PREFSRC:
+ route.Src = net.IP(attr.Value)
+ case syscall.RTA_DST:
+ route.Dst = &net.IPNet{
+ IP: attr.Value,
+ Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+ }
+ case syscall.RTA_OIF:
+ routeIndex := int(native.Uint32(attr.Value[0:4]))
+ route.LinkIndex = routeIndex
+ }
+ }
+ res = append(res, route)
+ }
+ return res, nil
+
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route_test.go b/vendor/src/github.com/vishvananda/netlink/route_test.go
new file mode 100644
index 0000000000..f02bef8c87
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route_test.go
@@ -0,0 +1,84 @@
+package netlink
+
+import (
+ "net"
+ "testing"
+)
+
+func TestRouteAddDel(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ // get loopback interface
+ link, err := LinkByName("lo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // bring the interface up
+ if err = LinkSetUp(link); err != nil {
+ t.Fatal(err)
+ }
+
+ // add a gateway route
+ _, dst, err := net.ParseCIDR("192.168.0.0/24")
+
+ ip := net.ParseIP("127.1.1.1")
+ route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip}
+ err = RouteAdd(&route)
+ if err != nil {
+ t.Fatal(err)
+ }
+ routes, err := RouteList(link, FAMILY_V4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(routes) != 1 {
+ t.Fatal("Link not added properly")
+ }
+
+ dstIP := net.ParseIP("192.168.0.42")
+ routeToDstIP, err := RouteGet(dstIP)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(routeToDstIP) == 0 {
+ t.Fatal("Default route not present")
+ }
+
+ err = RouteDel(&route)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ routes, err = RouteList(link, FAMILY_V4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(routes) != 0 {
+ t.Fatal("Route not removed properly")
+ }
+
+}
+
+func TestRouteAddIncomplete(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ // get loopback interface
+ link, err := LinkByName("lo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // bring the interface up
+ if err = LinkSetUp(link); err != nil {
+ t.Fatal(err)
+ }
+
+ route := Route{LinkIndex: link.Attrs().Index}
+ if err := RouteAdd(&route); err == nil {
+ t.Fatal("Adding incomplete route should fail")
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm.go b/vendor/src/github.com/vishvananda/netlink/xfrm.go
new file mode 100644
index 0000000000..621ffb6c68
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm.go
@@ -0,0 +1,64 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+)
+
+// Proto is an enum representing an ipsec protocol.
+type Proto uint8
+
+const (
+ XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING
+ XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP
+ XFRM_PROTO_AH Proto = syscall.IPPROTO_AH
+ XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS
+ XFRM_PROTO_COMP Proto = syscall.IPPROTO_COMP
+ XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW
+)
+
+func (p Proto) String() string {
+ switch p {
+ case XFRM_PROTO_ROUTE2:
+ return "route2"
+ case XFRM_PROTO_ESP:
+ return "esp"
+ case XFRM_PROTO_AH:
+ return "ah"
+ case XFRM_PROTO_HAO:
+ return "hao"
+ case XFRM_PROTO_COMP:
+ return "comp"
+ case XFRM_PROTO_IPSEC_ANY:
+ return "ipsec-any"
+ }
+ return fmt.Sprintf("%d", p)
+}
+
+// Mode is an enum representing an ipsec transport.
+type Mode uint8
+
+const (
+ XFRM_MODE_TRANSPORT Mode = iota
+ XFRM_MODE_TUNNEL
+ XFRM_MODE_ROUTEOPTIMIZATION
+ XFRM_MODE_IN_TRIGGER
+ XFRM_MODE_BEET
+ XFRM_MODE_MAX
+)
+
+func (m Mode) String() string {
+ switch m {
+ case XFRM_MODE_TRANSPORT:
+ return "transport"
+ case XFRM_MODE_TUNNEL:
+ return "tunnel"
+ case XFRM_MODE_ROUTEOPTIMIZATION:
+ return "ro"
+ case XFRM_MODE_IN_TRIGGER:
+ return "in_trigger"
+ case XFRM_MODE_BEET:
+ return "beet"
+ }
+ return fmt.Sprintf("%d", m)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
new file mode 100644
index 0000000000..d85c65d2d2
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
@@ -0,0 +1,59 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+)
+
+// Dir is an enum representing an ipsec template direction.
+type Dir uint8
+
+const (
+ XFRM_DIR_IN Dir = iota
+ XFRM_DIR_OUT
+ XFRM_DIR_FWD
+ XFRM_SOCKET_IN
+ XFRM_SOCKET_OUT
+ XFRM_SOCKET_FWD
+)
+
+func (d Dir) String() string {
+ switch d {
+ case XFRM_DIR_IN:
+ return "dir in"
+ case XFRM_DIR_OUT:
+ return "dir out"
+ case XFRM_DIR_FWD:
+ return "dir fwd"
+ case XFRM_SOCKET_IN:
+ return "socket in"
+ case XFRM_SOCKET_OUT:
+ return "socket out"
+ case XFRM_SOCKET_FWD:
+ return "socket fwd"
+ }
+ return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
+}
+
+// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
+// policy. These rules are matched with XfrmState to determine encryption
+// and authentication algorithms.
+type XfrmPolicyTmpl struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Reqid int
+}
+
+// XfrmPolicy represents an ipsec policy. It represents the overlay network
+// and has a list of XfrmPolicyTmpls representing the base addresses of
+// the policy.
+type XfrmPolicy struct {
+ Dst *net.IPNet
+ Src *net.IPNet
+ Dir Dir
+ Priority int
+ Index int
+ Tmpls []XfrmPolicyTmpl
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
new file mode 100644
index 0000000000..6fe1b63757
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -0,0 +1,127 @@
+package netlink
+
+import (
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
+ sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
+ sel.Daddr.FromIP(policy.Dst.IP)
+ sel.Saddr.FromIP(policy.Src.IP)
+ prefixlenD, _ := policy.Dst.Mask.Size()
+ sel.PrefixlenD = uint8(prefixlenD)
+ prefixlenS, _ := policy.Src.Mask.Size()
+ sel.PrefixlenS = uint8(prefixlenS)
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWPOLICY, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyInfo{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Priority = uint32(policy.Priority)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ msg.Lft.SoftByteLimit = nl.XFRM_INF
+ msg.Lft.HardByteLimit = nl.XFRM_INF
+ msg.Lft.SoftPacketLimit = nl.XFRM_INF
+ msg.Lft.HardPacketLimit = nl.XFRM_INF
+ req.AddData(msg)
+
+ tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls))
+ for i, tmpl := range policy.Tmpls {
+ start := i * nl.SizeofXfrmUserTmpl
+ userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl])
+ userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
+ userTmpl.Saddr.FromIP(tmpl.Src)
+ userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
+ userTmpl.Mode = uint8(tmpl.Mode)
+ userTmpl.Reqid = uint32(tmpl.Reqid)
+ userTmpl.Aalgos = ^uint32(0)
+ userTmpl.Ealgos = ^uint32(0)
+ userTmpl.Calgos = ^uint32(0)
+ }
+ if len(tmplData) > 0 {
+ tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData)
+ req.AddData(tmpls)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELPOLICY, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUserpolicyId{}
+ selFromPolicy(&msg.Sel, policy)
+ msg.Index = uint32(policy.Index)
+ msg.Dir = uint8(policy.Dir)
+ req.AddData(msg)
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]XfrmPolicy, 0)
+ for _, m := range msgs {
+ msg := nl.DeserializeXfrmUserpolicyInfo(m)
+
+ if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+ continue
+ }
+
+ var policy XfrmPolicy
+
+ policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
+ policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+ policy.Priority = int(msg.Priority)
+ policy.Index = int(msg.Index)
+ policy.Dir = Dir(msg.Dir)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_TMPL:
+ max := len(attr.Value)
+ for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
+ var resTmpl XfrmPolicyTmpl
+ tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
+ resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
+ resTmpl.Src = tmpl.Saddr.ToIP()
+ resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
+ resTmpl.Mode = Mode(tmpl.Mode)
+ resTmpl.Reqid = int(tmpl.Reqid)
+ policy.Tmpls = append(policy.Tmpls, resTmpl)
+ }
+ }
+ }
+ res = append(res, policy)
+ }
+ return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go
new file mode 100644
index 0000000000..06d178d1f9
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go
@@ -0,0 +1,49 @@
+package netlink
+
+import (
+ "net"
+ "testing"
+)
+
+func TestXfrmPolicyAddDel(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ src, _ := ParseIPNet("127.1.1.1/32")
+ dst, _ := ParseIPNet("127.1.1.2/32")
+ policy := XfrmPolicy{
+ Src: src,
+ Dst: dst,
+ Dir: XFRM_DIR_OUT,
+ }
+ tmpl := XfrmPolicyTmpl{
+ Src: net.ParseIP("127.0.0.1"),
+ Dst: net.ParseIP("127.0.0.2"),
+ Proto: XFRM_PROTO_ESP,
+ Mode: XFRM_MODE_TUNNEL,
+ }
+ policy.Tmpls = append(policy.Tmpls, tmpl)
+ if err := XfrmPolicyAdd(&policy); err != nil {
+ t.Fatal(err)
+ }
+ policies, err := XfrmPolicyList(FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(policies) != 1 {
+ t.Fatal("Policy not added properly")
+ }
+
+ if err = XfrmPolicyDel(&policy); err != nil {
+ t.Fatal(err)
+ }
+
+ policies, err = XfrmPolicyList(FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(policies) != 0 {
+ t.Fatal("Policy not removed properly")
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
new file mode 100644
index 0000000000..5b8f2df708
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+ "net"
+)
+
+// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
+type XfrmStateAlgo struct {
+ Name string
+ Key []byte
+ TruncateLen int // Auth only
+}
+
+// EncapType is an enum representing an ipsec template direction.
+type EncapType uint8
+
+const (
+ XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
+ XFRM_ENCAP_ESPINUDP
+)
+
+func (e EncapType) String() string {
+ switch e {
+ case XFRM_ENCAP_ESPINUDP_NONIKE:
+ return "espinudp-nonike"
+ case XFRM_ENCAP_ESPINUDP:
+ return "espinudp"
+ }
+ return "unknown"
+}
+
+// XfrmEncap represents the encapsulation to use for the ipsec encryption.
+type XfrmStateEncap struct {
+ Type EncapType
+ SrcPort int
+ DstPort int
+ OriginalAddress net.IP
+}
+
+// XfrmState represents the state of an ipsec policy. It optionally
+// contains an XfrmStateAlgo for encryption and one for authentication.
+type XfrmState struct {
+ Dst net.IP
+ Src net.IP
+ Proto Proto
+ Mode Mode
+ Spi int
+ Reqid int
+ ReplayWindow int
+ Auth *XfrmStateAlgo
+ Crypt *XfrmStateAlgo
+ Encap *XfrmStateEncap
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
new file mode 100644
index 0000000000..0f1fbd0e06
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -0,0 +1,181 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+func writeStateAlgo(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgo{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
+ algo := nl.XfrmAlgoAuth{
+ AlgKeyLen: uint32(len(a.Key) * 8),
+ AlgTruncLen: uint32(a.TruncateLen),
+ AlgKey: a.Key,
+ }
+ end := len(a.Name)
+ if end > 64 {
+ end = 64
+ }
+ copy(algo.AlgName[:end], a.Name)
+ return algo.Serialize()
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func XfrmStateAdd(state *XfrmState) error {
+ // A state with spi 0 can't be deleted so don't allow it to be set
+ if state.Spi == 0 {
+ return fmt.Errorf("Spi must be set when adding xfrm state.")
+ }
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUsersaInfo{}
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Id.Daddr.FromIP(state.Dst)
+ msg.Saddr.FromIP(state.Src)
+ msg.Id.Proto = uint8(state.Proto)
+ msg.Mode = uint8(state.Mode)
+ msg.Id.Spi = nl.Swap32(uint32(state.Spi))
+ msg.Reqid = uint32(state.Reqid)
+ msg.ReplayWindow = uint8(state.ReplayWindow)
+ msg.Lft.SoftByteLimit = nl.XFRM_INF
+ msg.Lft.HardByteLimit = nl.XFRM_INF
+ msg.Lft.SoftPacketLimit = nl.XFRM_INF
+ msg.Lft.HardPacketLimit = nl.XFRM_INF
+ req.AddData(msg)
+
+ if state.Auth != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth))
+ req.AddData(out)
+ }
+ if state.Crypt != nil {
+ out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
+ req.AddData(out)
+ }
+ if state.Encap != nil {
+ encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
+ encap := nl.DeserializeXfrmEncapTmpl(encapData)
+ encap.EncapType = uint16(state.Encap.Type)
+ encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort))
+ encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort))
+ encap.EncapOa.FromIP(state.Encap.OriginalAddress)
+ out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
+ req.AddData(out)
+ }
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func XfrmStateDel(state *XfrmState) error {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK)
+
+ msg := &nl.XfrmUsersaId{}
+ msg.Daddr.FromIP(state.Dst)
+ msg.Family = uint16(nl.GetIPFamily(state.Dst))
+ msg.Proto = uint8(state.Proto)
+ msg.Spi = nl.Swap32(uint32(state.Spi))
+ req.AddData(msg)
+
+ saddr := nl.XfrmAddress{}
+ saddr.FromIP(state.Src)
+ srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize())
+
+ req.AddData(srcdata)
+
+ _, err := req.Execute(syscall.NETLINK_XFRM, 0)
+ return err
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip xfrm state show`.
+// The list can be filtered by ip family.
+func XfrmStateList(family int) ([]XfrmState, error) {
+ req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
+
+ msg := nl.NewIfInfomsg(family)
+ req.AddData(msg)
+
+ msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]XfrmState, 0)
+ for _, m := range msgs {
+ msg := nl.DeserializeXfrmUsersaInfo(m)
+
+ if family != FAMILY_ALL && family != int(msg.Family) {
+ continue
+ }
+
+ var state XfrmState
+
+ state.Dst = msg.Id.Daddr.ToIP()
+ state.Src = msg.Saddr.ToIP()
+ state.Proto = Proto(msg.Id.Proto)
+ state.Mode = Mode(msg.Mode)
+ state.Spi = int(nl.Swap32(msg.Id.Spi))
+ state.Reqid = int(msg.Reqid)
+ state.ReplayWindow = int(msg.ReplayWindow)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
+ var resAlgo *XfrmStateAlgo
+ if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ resAlgo = state.Auth
+ } else {
+ state.Crypt = new(XfrmStateAlgo)
+ resAlgo = state.Crypt
+ }
+ algo := nl.DeserializeXfrmAlgo(attr.Value[:])
+ (*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
+ (*resAlgo).Key = algo.AlgKey
+ case nl.XFRMA_ALG_AUTH_TRUNC:
+ if state.Auth == nil {
+ state.Auth = new(XfrmStateAlgo)
+ }
+ algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
+ state.Auth.Name = nl.BytesToString(algo.AlgName[:])
+ state.Auth.Key = algo.AlgKey
+ state.Auth.TruncateLen = int(algo.AlgTruncLen)
+ case nl.XFRMA_ENCAP:
+ encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
+ state.Encap = new(XfrmStateEncap)
+ state.Encap.Type = EncapType(encap.EncapType)
+ state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
+ state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
+ state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+ }
+
+ }
+ res = append(res, state)
+ }
+ return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go
new file mode 100644
index 0000000000..df57ef8b7e
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go
@@ -0,0 +1,50 @@
+package netlink
+
+import (
+ "net"
+ "testing"
+)
+
+func TestXfrmStateAddDel(t *testing.T) {
+ tearDown := setUpNetlinkTest(t)
+ defer tearDown()
+
+ state := XfrmState{
+ Src: net.ParseIP("127.0.0.1"),
+ Dst: net.ParseIP("127.0.0.2"),
+ Proto: XFRM_PROTO_ESP,
+ Mode: XFRM_MODE_TUNNEL,
+ Spi: 1,
+ Auth: &XfrmStateAlgo{
+ Name: "hmac(sha256)",
+ Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
+ },
+ Crypt: &XfrmStateAlgo{
+ Name: "cbc(aes)",
+ Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
+ },
+ }
+ if err := XfrmStateAdd(&state); err != nil {
+ t.Fatal(err)
+ }
+ policies, err := XfrmStateList(FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(policies) != 1 {
+ t.Fatal("State not added properly")
+ }
+
+ if err = XfrmStateDel(&state); err != nil {
+ t.Fatal(err)
+ }
+
+ policies, err = XfrmStateList(FAMILY_ALL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(policies) != 0 {
+ t.Fatal("State not removed properly")
+ }
+}
diff --git a/vendor/src/github.com/vishvananda/netns/LICENSE b/vendor/src/github.com/vishvananda/netns/LICENSE
new file mode 100644
index 0000000000..9f64db8582
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/LICENSE
@@ -0,0 +1,192 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Vishvananda Ishaya.
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/vishvananda/netns/README.md b/vendor/src/github.com/vishvananda/netns/README.md
new file mode 100644
index 0000000000..24a4003ae6
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/README.md
@@ -0,0 +1,49 @@
+# netns - network namespaces in go #
+
+The netns package provides an ultra-simple interface for handling
+network namespaces in go. Changing namespaces requires elevated
+privileges, so in most cases this code needs to be run as root.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+ go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+ sudo -E go test github.com/vishvananda/netns
+
+## Example ##
+
+```go
+package main
+
+import (
+ "net"
+ "runtime"
+ "github.com/vishvananada/netns"
+)
+
+func main() {
+ // Lock the OS Thread so we don't accidentally switch namespaces
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // Save the current network namespace
+ origns, _ := netns.Get()
+ defer origns.Close()
+
+ // Create a new network namespace
+ newns, _ := netns.New()
+ defer newns.Close()
+
+ // Do something with tne network namespace
+ ifaces, _ := net.Interfaces()
+ fmt.Printf("Interfaces: %v\n", ifaces)
+
+ // Switch back to the original namespace
+ netns.Set(origns)
+}
+
+```
diff --git a/vendor/src/github.com/vishvananda/netns/netns.go b/vendor/src/github.com/vishvananda/netns/netns.go
new file mode 100644
index 0000000000..3878da3389
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns.go
@@ -0,0 +1,66 @@
+// Package netns allows ultra-simple network namespace handling. NsHandles
+// can be retrieved and set. Note that the current namespace is thread
+// local so actions that set and reset namespaces should use LockOSThread
+// to make sure the namespace doesn't change due to a goroutine switch.
+// It is best to close NsHandles when you are done with them. This can be
+// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
+// requires elevated privileges, so in most cases this code needs to be run
+// as root.
+package netns
+
+import (
+ "fmt"
+ "syscall"
+)
+// NsHandle is a handle to a network namespace. It can be cast directly
+// to an int and used as a file descriptor.
+type NsHandle int
+
+// Equal determines if two network handles refer to the same network
+// namespace. This is done by comparing the device and inode that the
+// file descripors point to.
+func (ns NsHandle) Equal(other NsHandle) bool {
+ if ns == other {
+ return true
+ }
+ var s1, s2 syscall.Stat_t
+ if err := syscall.Fstat(int(ns), &s1); err != nil {
+ return false
+ }
+ if err := syscall.Fstat(int(other), &s2); err != nil {
+ return false
+ }
+ return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino)
+}
+
+// String shows the file descriptor number and its dev and inode.
+func (ns NsHandle) String() string {
+ var s syscall.Stat_t
+ if ns == -1 {
+ return "NS(None)"
+ }
+ if err := syscall.Fstat(int(ns), &s); err != nil {
+ return fmt.Sprintf("NS(%d: unknown)", ns)
+ }
+ return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino)
+}
+
+// IsOpen returns true if Close() has not been called.
+func (ns NsHandle) IsOpen() bool {
+ return ns != -1
+}
+
+// Close closes the NsHandle and resets its file descriptor to -1.
+// It is not safe to use an NsHandle after Close() is called.
+func (ns *NsHandle) Close() error {
+ if err := syscall.Close(int(*ns)); err != nil {
+ return err
+ }
+ (*ns) = -1
+ return nil
+}
+
+// Get an empty (closed) NsHandle
+func None() NsHandle {
+ return NsHandle(-1)
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux.go b/vendor/src/github.com/vishvananda/netns/netns_linux.go
new file mode 100644
index 0000000000..1cf5e136ec
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux.go
@@ -0,0 +1,206 @@
+package netns
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+const (
+ // These constants belong in the syscall library but have not been
+ // added yet.
+ CLONE_NEWUTS = 0x04000000 /* New utsname group? */
+ CLONE_NEWIPC = 0x08000000 /* New ipcs */
+ CLONE_NEWUSER = 0x10000000 /* New user namespace */
+ CLONE_NEWPID = 0x20000000 /* New pid namespace */
+ CLONE_NEWNET = 0x40000000 /* New network namespace */
+ CLONE_IO = 0x80000000 /* Get io context */
+)
+
+// Setns sets namespace using syscall. Note that this should be a method
+// in syscall but it has not been added.
+func Setns(ns NsHandle, nstype int) (err error) {
+ _, _, e1 := syscall.Syscall(SYS_SETNS, uintptr(ns), uintptr(nstype), 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Set sets the current network namespace to the namespace represented
+// by NsHandle.
+func Set(ns NsHandle) (err error) {
+ return Setns(ns, CLONE_NEWNET)
+}
+
+// New creates a new network namespace and returns a handle to it.
+func New() (ns NsHandle, err error) {
+ if err := syscall.Unshare(CLONE_NEWNET); err != nil {
+ return -1, err
+ }
+ return Get()
+}
+
+// Get gets a handle to the current threads network namespace.
+func Get() (NsHandle, error) {
+ return GetFromThread(os.Getpid(), syscall.Gettid())
+}
+
+// GetFromName gets a handle to a named network namespace such as one
+// created by `ip netns add`.
+func GetFromName(name string) (NsHandle, error) {
+ fd, err := syscall.Open(fmt.Sprintf("/var/run/netns/%s", name), syscall.O_RDONLY, 0)
+ if err != nil {
+ return -1, err
+ }
+ return NsHandle(fd), nil
+}
+
+// GetFromPid gets a handle to the network namespace of a given pid.
+func GetFromPid(pid int) (NsHandle, error) {
+ fd, err := syscall.Open(fmt.Sprintf("/proc/%d/ns/net", pid), syscall.O_RDONLY, 0)
+ if err != nil {
+ return -1, err
+ }
+ return NsHandle(fd), nil
+}
+
+// GetFromThread gets a handle to the network namespace of a given pid and tid.
+func GetFromThread(pid, tid int) (NsHandle, error) {
+ name := fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)
+ fd, err := syscall.Open(name, syscall.O_RDONLY, 0)
+ if err != nil {
+ return -1, err
+ }
+ return NsHandle(fd), nil
+}
+
+// GetFromDocker gets a handle to the network namespace of a docker container.
+// Id is prefixed matched against the running docker containers, so a short
+// identifier can be used as long as it isn't ambiguous.
+func GetFromDocker(id string) (NsHandle, error) {
+ pid, err := getPidForContainer(id)
+ if err != nil {
+ return -1, err
+ }
+ return GetFromPid(pid)
+}
+
+// borrowed from docker/utils/utils.go
+func findCgroupMountpoint(cgroupType string) (string, error) {
+ output, err := ioutil.ReadFile("/proc/mounts")
+ if err != nil {
+ return "", err
+ }
+
+ // /proc/mounts has 6 fields per line, one mount per line, e.g.
+ // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
+ for _, line := range strings.Split(string(output), "\n") {
+ parts := strings.Split(line, " ")
+ if len(parts) == 6 && parts[2] == "cgroup" {
+ for _, opt := range strings.Split(parts[3], ",") {
+ if opt == cgroupType {
+ return parts[1], nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
+}
+
+// Returns the relative path to the cgroup docker is running in.
+// borrowed from docker/utils/utils.go
+// modified to get the docker pid instead of using /proc/self
+func getThisCgroup(cgroupType string) (string, error) {
+ dockerpid, err := ioutil.ReadFile("/var/run/docker.pid")
+ if err != nil {
+ return "", err
+ }
+ result := strings.Split(string(dockerpid), "\n")
+ if len(result) == 0 || len(result[0]) == 0 {
+ return "", fmt.Errorf("docker pid not found in /var/run/docker.pid")
+ }
+ pid, err := strconv.Atoi(result[0])
+
+ output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
+ if err != nil {
+ return "", err
+ }
+ for _, line := range strings.Split(string(output), "\n") {
+ parts := strings.Split(line, ":")
+ // any type used by docker should work
+ if parts[1] == cgroupType {
+ return parts[2], nil
+ }
+ }
+ return "", fmt.Errorf("cgroup '%s' not found in /proc/%d/cgroup", cgroupType, pid)
+}
+
+// Returns the first pid in a container.
+// borrowed from docker/utils/utils.go
+// modified to only return the first pid
+// modified to glob with id
+// modified to search for newer docker containers
+func getPidForContainer(id string) (int, error) {
+ pid := 0
+
+ // memory is chosen randomly, any cgroup used by docker works
+ cgroupType := "memory"
+
+ cgroupRoot, err := findCgroupMountpoint(cgroupType)
+ if err != nil {
+ return pid, err
+ }
+
+ cgroupThis, err := getThisCgroup(cgroupType)
+ if err != nil {
+ return pid, err
+ }
+
+ id += "*"
+
+ attempts := []string{
+ filepath.Join(cgroupRoot, cgroupThis, id, "tasks"),
+ // With more recent lxc versions use, cgroup will be in lxc/
+ filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"),
+ // With more recent dockee, cgroup will be in docker/
+ filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"),
+ }
+
+ var filename string
+ for _, attempt := range attempts {
+ filenames, _ := filepath.Glob(attempt)
+ if len(filenames) > 1 {
+ return pid, fmt.Errorf("Ambiguous id supplied: %v", filenames)
+ } else if len(filenames) == 1 {
+ filename = filenames[0]
+ break
+ }
+ }
+
+ if filename == "" {
+ return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1])
+ }
+
+ output, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return pid, err
+ }
+
+ result := strings.Split(string(output), "\n")
+ if len(result) == 0 || len(result[0]) == 0 {
+ return pid, fmt.Errorf("No pid found for container")
+ }
+
+ pid, err = strconv.Atoi(result[0])
+ if err != nil {
+ return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err)
+ }
+
+ return pid, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_386.go b/vendor/src/github.com/vishvananda/netns/netns_linux_386.go
new file mode 100644
index 0000000000..0a6fe49a0b
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_386.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+ SYS_SETNS = 346
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go b/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go
new file mode 100644
index 0000000000..bbf3f4de49
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+ SYS_SETNS = 308
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go b/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go
new file mode 100644
index 0000000000..e35cb07647
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+ SYS_SETNS = 374
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_test.go b/vendor/src/github.com/vishvananda/netns/netns_test.go
new file mode 100644
index 0000000000..e51981cc1b
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_test.go
@@ -0,0 +1,66 @@
+package netns
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+func TestGetNewSetDelete(t *testing.T) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ origns, err := Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ newns, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if origns.Equal(newns) {
+ t.Fatal("New ns failed")
+ }
+ if err := Set(origns); err != nil {
+ t.Fatal(err)
+ }
+ newns.Close()
+ if newns.IsOpen() {
+ t.Fatal("newns still open after close", newns)
+ }
+ ns, err := Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ns.Equal(origns) {
+ t.Fatal("Reset ns failed", origns, newns, ns)
+ }
+}
+
+func TestNone(t *testing.T) {
+ ns := None()
+ if ns.IsOpen() {
+ t.Fatal("None ns is open", ns)
+ }
+}
+
+func TestThreaded(t *testing.T) {
+ ncpu := runtime.GOMAXPROCS(-1)
+ if ncpu < 2 {
+ t.Skip("-cpu=2 or larger required")
+ }
+
+ // Lock this thread simply to ensure other threads get used.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ wg := &sync.WaitGroup{}
+ for i := 0; i < ncpu; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ TestGetNewSetDelete(t)
+ }()
+ }
+ wg.Wait()
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_unspecified.go b/vendor/src/github.com/vishvananda/netns/netns_unspecified.go
new file mode 100644
index 0000000000..42a804fe88
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_unspecified.go
@@ -0,0 +1,35 @@
+// +build !linux
+
+package netns
+
+import (
+ "errors"
+)
+
+var (
+ ErrNotImplemented = errors.New("not implemented")
+)
+
+func Set(ns Namespace) (err error) {
+ return ErrNotImplemented
+}
+
+func New() (ns Namespace, err error) {
+ return -1, ErrNotImplemented
+}
+
+func Get() (Namespace, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromName(name string) (Namespace, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromPid(pid int) (Namespace, error) {
+ return -1, ErrNotImplemented
+}
+
+func GetFromDocker(id string) (Namespace, error) {
+ return -1, ErrNotImplemented
+}
diff --git a/volume/drivers/adapter.go b/volume/drivers/adapter.go
new file mode 100644
index 0000000000..6846a3a8e8
--- /dev/null
+++ b/volume/drivers/adapter.go
@@ -0,0 +1,60 @@
+package volumedrivers
+
+import "github.com/docker/docker/volume"
+
+type volumeDriverAdapter struct {
+ name string
+ proxy *volumeDriverProxy
+}
+
+func (a *volumeDriverAdapter) Name() string {
+ return a.name
+}
+
+func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
+ err := a.proxy.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &volumeAdapter{
+ proxy: a.proxy,
+ name: name,
+ driverName: a.name}, nil
+}
+
+func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
+ return a.proxy.Remove(v.Name())
+}
+
+type volumeAdapter struct {
+ proxy *volumeDriverProxy
+ name string
+ driverName string
+ eMount string // ephemeral host volume path
+}
+
+func (a *volumeAdapter) Name() string {
+ return a.name
+}
+
+func (a *volumeAdapter) DriverName() string {
+ return a.driverName
+}
+
+func (a *volumeAdapter) Path() string {
+ if len(a.eMount) > 0 {
+ return a.eMount
+ }
+ m, _ := a.proxy.Path(a.name)
+ return m
+}
+
+func (a *volumeAdapter) Mount() (string, error) {
+ var err error
+ a.eMount, err = a.proxy.Mount(a.name)
+ return a.eMount, err
+}
+
+func (a *volumeAdapter) Unmount() error {
+ return a.proxy.Unmount(a.name)
+}
diff --git a/volume/drivers/api.go b/volume/drivers/api.go
new file mode 100644
index 0000000000..1b98fa7fc5
--- /dev/null
+++ b/volume/drivers/api.go
@@ -0,0 +1,20 @@
+package volumedrivers
+
+import "github.com/docker/docker/volume"
+
+type client interface {
+ Call(string, interface{}, interface{}) error
+}
+
+func NewVolumeDriver(name string, c client) volume.Driver {
+ proxy := &volumeDriverProxy{c}
+ return &volumeDriverAdapter{name, proxy}
+}
+
+type VolumeDriver interface {
+ Create(name string) (err error)
+ Remove(name string) (err error)
+ Path(name string) (mountpoint string, err error)
+ Mount(name string) (mountpoint string, err error)
+ Unmount(name string) (err error)
+}
diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go
new file mode 100644
index 0000000000..b002a0ffda
--- /dev/null
+++ b/volume/drivers/extpoint.go
@@ -0,0 +1,61 @@
+package volumedrivers
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/docker/volume"
+)
+
+// currently created by hand. generation tool would generate this like:
+// $ extpoint-gen Driver > volume/extpoint.go
+
+var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
+
+type driverExtpoint struct {
+ extensions map[string]volume.Driver
+ sync.Mutex
+}
+
+func Register(extension volume.Driver, name string) bool {
+ drivers.Lock()
+ defer drivers.Unlock()
+ if name == "" {
+ return false
+ }
+ _, exists := drivers.extensions[name]
+ if exists {
+ return false
+ }
+ drivers.extensions[name] = extension
+ return true
+}
+
+func Unregister(name string) bool {
+ drivers.Lock()
+ defer drivers.Unlock()
+ _, exists := drivers.extensions[name]
+ if !exists {
+ return false
+ }
+ delete(drivers.extensions, name)
+ return true
+}
+
+func Lookup(name string) (volume.Driver, error) {
+ drivers.Lock()
+ defer drivers.Unlock()
+ ext, ok := drivers.extensions[name]
+ if ok {
+ return ext, nil
+ }
+ pl, err := plugins.Get(name, "VolumeDriver")
+ if err != nil {
+ return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
+ }
+
+ d := NewVolumeDriver(name, pl.Client)
+ drivers.extensions[name] = d
+ return d, nil
+}
diff --git a/volume/drivers/proxy.go b/volume/drivers/proxy.go
new file mode 100644
index 0000000000..545e49007a
--- /dev/null
+++ b/volume/drivers/proxy.go
@@ -0,0 +1,74 @@
+package volumedrivers
+
+import "fmt"
+
+// currently created by hand. generation tool would generate this like:
+// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go
+
+type volumeDriverRequest struct {
+ Name string
+}
+
+type volumeDriverResponse struct {
+ Mountpoint string `json:",ommitempty"`
+ Err error `json:",ommitempty"`
+}
+
+type volumeDriverProxy struct {
+ c client
+}
+
+func (pp *volumeDriverProxy) Create(name string) error {
+ args := volumeDriverRequest{name}
+ var ret volumeDriverResponse
+ err := pp.c.Call("VolumeDriver.Create", args, &ret)
+ if err != nil {
+ return pp.fmtError(name, err)
+ }
+ return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Remove(name string) error {
+ args := volumeDriverRequest{name}
+ var ret volumeDriverResponse
+ err := pp.c.Call("VolumeDriver.Remove", args, &ret)
+ if err != nil {
+ return pp.fmtError(name, err)
+ }
+ return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Path(name string) (string, error) {
+ args := volumeDriverRequest{name}
+ var ret volumeDriverResponse
+ if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
+ return "", pp.fmtError(name, err)
+ }
+ return ret.Mountpoint, pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Mount(name string) (string, error) {
+ args := volumeDriverRequest{name}
+ var ret volumeDriverResponse
+ if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
+ return "", pp.fmtError(name, err)
+ }
+ return ret.Mountpoint, pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Unmount(name string) error {
+ args := volumeDriverRequest{name}
+ var ret volumeDriverResponse
+ err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
+ if err != nil {
+ return pp.fmtError(name, err)
+ }
+ return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) fmtError(name string, err error) error {
+ if err == nil {
+ return nil
+ }
+ return fmt.Errorf("External volume driver request failed for %s: %v", name, err)
+}
diff --git a/volume/local/local.go b/volume/local/local.go
new file mode 100644
index 0000000000..3082e72bd0
--- /dev/null
+++ b/volume/local/local.go
@@ -0,0 +1,126 @@
+package local
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/docker/docker/volume"
+)
+
+func New(rootDirectory string) (*Root, error) {
+ if err := os.MkdirAll(rootDirectory, 0700); err != nil {
+ return nil, err
+ }
+ r := &Root{
+ path: rootDirectory,
+ volumes: make(map[string]*Volume),
+ }
+ dirs, err := ioutil.ReadDir(rootDirectory)
+ if err != nil {
+ return nil, err
+ }
+ for _, d := range dirs {
+ name := filepath.Base(d.Name())
+ r.volumes[name] = &Volume{
+ driverName: r.Name(),
+ name: name,
+ path: filepath.Join(rootDirectory, name),
+ }
+ }
+ return r, nil
+}
+
+type Root struct {
+ m sync.Mutex
+ path string
+ volumes map[string]*Volume
+}
+
+func (r *Root) Name() string {
+ return "local"
+}
+
+func (r *Root) Create(name string) (volume.Volume, error) {
+ r.m.Lock()
+ defer r.m.Unlock()
+ v, exists := r.volumes[name]
+ if !exists {
+ path := filepath.Join(r.path, name)
+ if err := os.Mkdir(path, 0755); err != nil {
+ if os.IsExist(err) {
+ return nil, fmt.Errorf("volume already exists under %s", path)
+ }
+ return nil, err
+ }
+ v = &Volume{
+ driverName: r.Name(),
+ name: name,
+ path: path,
+ }
+ r.volumes[name] = v
+ }
+ v.use()
+ return v, nil
+}
+
+func (r *Root) Remove(v volume.Volume) error {
+ r.m.Lock()
+ defer r.m.Unlock()
+ lv, ok := v.(*Volume)
+ if !ok {
+ return errors.New("unknown volume type")
+ }
+ lv.release()
+ if lv.usedCount == 0 {
+ delete(r.volumes, lv.name)
+ return os.RemoveAll(lv.path)
+ }
+ return nil
+}
+
+type Volume struct {
+ m sync.Mutex
+ usedCount int
+ // unique name of the volume
+ name string
+ // path is the path on the host where the data lives
+ path string
+ // driverName is the name of the driver that created the volume.
+ driverName string
+}
+
+func (v *Volume) Name() string {
+ return v.name
+}
+
+func (v *Volume) DriverName() string {
+ return v.driverName
+}
+
+func (v *Volume) Path() string {
+ return v.path
+}
+
+func (v *Volume) Mount() (string, error) {
+ return v.path, nil
+}
+
+func (v *Volume) Unmount() error {
+ return nil
+}
+
+func (v *Volume) use() {
+ v.m.Lock()
+ v.usedCount++
+ v.m.Unlock()
+}
+
+func (v *Volume) release() {
+ v.m.Lock()
+ v.usedCount--
+ v.m.Unlock()
+}
diff --git a/volume/volume.go b/volume/volume.go
new file mode 100644
index 0000000000..6edcae3c21
--- /dev/null
+++ b/volume/volume.go
@@ -0,0 +1,26 @@
+package volume
+
+const DefaultDriverName = "local"
+
+type Driver interface {
+ // Name returns the name of the volume driver.
+ Name() string
+ // Create makes a new volume with the given id.
+ Create(string) (Volume, error)
+ // Remove deletes the volume.
+ Remove(Volume) error
+}
+
+type Volume interface {
+ // Name returns the name of the volume
+ Name() string
+ // DriverName returns the name of the driver which owns this volume.
+ DriverName() string
+ // Path returns the absolute path to the volume.
+ Path() string
+ // Mount mounts the volume and returns the absolute path to
+ // where it can be consumed.
+ Mount() (string, error)
+ // Unmount unmounts the volume when it is no longer in use.
+ Unmount() error
+}
diff --git a/volumes/repository.go b/volumes/repository.go
deleted file mode 100644
index 71d6c0ad60..0000000000
--- a/volumes/repository.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package volumes
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/Sirupsen/logrus"
- "github.com/docker/docker/daemon/graphdriver"
- "github.com/docker/docker/pkg/stringid"
-)
-
-type Repository struct {
- configPath string
- driver graphdriver.Driver
- volumes map[string]*Volume
- lock sync.Mutex
-}
-
-func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
- abspath, err := filepath.Abs(configPath)
- if err != nil {
- return nil, err
- }
-
- // Create the config path
- if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
- return nil, err
- }
-
- repo := &Repository{
- driver: driver,
- configPath: abspath,
- volumes: make(map[string]*Volume),
- }
-
- return repo, repo.restore()
-}
-
-func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
- var (
- isBindMount bool
- err error
- id = stringid.GenerateRandomID()
- )
- if path != "" {
- isBindMount = true
- }
-
- if path == "" {
- path, err = r.createNewVolumePath(id)
- if err != nil {
- return nil, err
- }
- }
- path = filepath.Clean(path)
-
- // Ignore the error here since the path may not exist
- // Really just want to make sure the path we are using is real(or nonexistent)
- if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
- path = cleanPath
- }
-
- v := &Volume{
- ID: id,
- Path: path,
- repository: r,
- Writable: writable,
- containers: make(map[string]struct{}),
- configPath: r.configPath + "/" + id,
- IsBindMount: isBindMount,
- }
-
- if err := v.initialize(); err != nil {
- return nil, err
- }
-
- r.add(v)
- return v, nil
-}
-
-func (r *Repository) restore() error {
- dir, err := ioutil.ReadDir(r.configPath)
- if err != nil {
- return err
- }
-
- for _, v := range dir {
- id := v.Name()
- vol := &Volume{
- ID: id,
- configPath: r.configPath + "/" + id,
- containers: make(map[string]struct{}),
- }
- if err := vol.FromDisk(); err != nil {
- if !os.IsNotExist(err) {
- logrus.Debugf("Error restoring volume: %v", err)
- continue
- }
- if err := vol.initialize(); err != nil {
- logrus.Debugf("%s", err)
- continue
- }
- }
- r.add(vol)
- }
- return nil
-}
-
-func (r *Repository) Get(path string) *Volume {
- r.lock.Lock()
- vol := r.get(path)
- r.lock.Unlock()
- return vol
-}
-
-func (r *Repository) get(path string) *Volume {
- path, err := filepath.EvalSymlinks(path)
- if err != nil {
- return nil
- }
- return r.volumes[filepath.Clean(path)]
-}
-
-func (r *Repository) add(volume *Volume) {
- if vol := r.get(volume.Path); vol != nil {
- return
- }
- r.volumes[volume.Path] = volume
-}
-
-func (r *Repository) Delete(path string) error {
- r.lock.Lock()
- defer r.lock.Unlock()
- path, err := filepath.EvalSymlinks(path)
- if err != nil {
- return err
- }
- volume := r.get(filepath.Clean(path))
- if volume == nil {
- return fmt.Errorf("Volume %s does not exist", path)
- }
-
- containers := volume.Containers()
- if len(containers) > 0 {
- return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
- }
-
- if err := os.RemoveAll(volume.configPath); err != nil {
- return err
- }
-
- if !volume.IsBindMount {
- if err := r.driver.Remove(volume.ID); err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- }
- }
-
- delete(r.volumes, volume.Path)
- return nil
-}
-
-func (r *Repository) createNewVolumePath(id string) (string, error) {
- if err := r.driver.Create(id, ""); err != nil {
- return "", err
- }
-
- path, err := r.driver.Get(id, "")
- if err != nil {
- return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
- }
-
- return path, nil
-}
-
-func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
- r.lock.Lock()
- defer r.lock.Unlock()
-
- if path == "" {
- return r.newVolume(path, writable)
- }
-
- if v := r.get(path); v != nil {
- return v, nil
- }
-
- return r.newVolume(path, writable)
-}
diff --git a/volumes/repository_test.go b/volumes/repository_test.go
deleted file mode 100644
index 801c225f75..0000000000
--- a/volumes/repository_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package volumes
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/docker/docker/daemon/graphdriver"
- _ "github.com/docker/docker/daemon/graphdriver/vfs"
-)
-
-func TestRepositoryFindOrCreate(t *testing.T) {
- root, err := ioutil.TempDir(os.TempDir(), "volumes")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(root)
- repo, err := newRepo(root)
- if err != nil {
- t.Fatal(err)
- }
-
- // no path
- v, err := repo.FindOrCreateVolume("", true)
- if err != nil {
- t.Fatal(err)
- }
-
- // FIXME: volumes are heavily dependent on the vfs driver, but this should not be so!
- expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID)
- if v.Path != expected {
- t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
- }
-
- // with a non-existant path
- dir := filepath.Join(root, "doesntexist")
- v, err = repo.FindOrCreateVolume(dir, true)
- if err != nil {
- t.Fatal(err)
- }
-
- if v.Path != dir {
- t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
- }
-
- if _, err := os.Stat(v.Path); err != nil {
- t.Fatal(err)
- }
-
- // with a pre-existing path
- // can just use the same path from above since it now exists
- v, err = repo.FindOrCreateVolume(dir, true)
- if err != nil {
- t.Fatal(err)
- }
- if v.Path != dir {
- t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
- }
-
-}
-
-func TestRepositoryGet(t *testing.T) {
- root, err := ioutil.TempDir(os.TempDir(), "volumes")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(root)
- repo, err := newRepo(root)
- if err != nil {
- t.Fatal(err)
- }
-
- v, err := repo.FindOrCreateVolume("", true)
- if err != nil {
- t.Fatal(err)
- }
-
- v2 := repo.Get(v.Path)
- if v2 == nil {
- t.Fatalf("expected to find volume but didn't")
- }
- if v2 != v {
- t.Fatalf("expected get to return same volume")
- }
-}
-
-func TestRepositoryDelete(t *testing.T) {
- root, err := ioutil.TempDir(os.TempDir(), "volumes")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(root)
- repo, err := newRepo(root)
- if err != nil {
- t.Fatal(err)
- }
-
- // with a normal volume
- v, err := repo.FindOrCreateVolume("", true)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := repo.Delete(v.Path); err != nil {
- t.Fatal(err)
- }
-
- if v := repo.Get(v.Path); v != nil {
- t.Fatalf("expected volume to not exist")
- }
-
- if _, err := os.Stat(v.Path); err == nil {
- t.Fatalf("expected volume files to be removed")
- }
-
- // with a bind mount
- dir := filepath.Join(root, "test")
- v, err = repo.FindOrCreateVolume(dir, true)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := repo.Delete(v.Path); err != nil {
- t.Fatal(err)
- }
-
- if v := repo.Get(v.Path); v != nil {
- t.Fatalf("expected volume to not exist")
- }
-
- if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
- t.Fatalf("expected bind volume data to persist after destroying volume")
- }
-
- // with container refs
- dir = filepath.Join(root, "test")
- v, err = repo.FindOrCreateVolume(dir, true)
- if err != nil {
- t.Fatal(err)
- }
- v.AddContainer("1234")
-
- if err := repo.Delete(v.Path); err == nil {
- t.Fatalf("expected volume delete to fail due to container refs")
- }
-
- v.RemoveContainer("1234")
- if err := repo.Delete(v.Path); err != nil {
- t.Fatal(err)
- }
-
-}
-
-func newRepo(root string) (*Repository, error) {
- configPath := filepath.Join(root, "repo-config")
- graphDir := filepath.Join(root, "repo-graph")
-
- driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
- if err != nil {
- return nil, err
- }
- return NewRepository(configPath, driver)
-}
diff --git a/volumes/volume.go b/volumes/volume.go
deleted file mode 100644
index 5b3b646018..0000000000
--- a/volumes/volume.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package volumes
-
-import (
- "encoding/json"
- "os"
- "path/filepath"
- "sync"
-
- "github.com/docker/docker/pkg/symlink"
-)
-
-type Volume struct {
- ID string
- Path string
- IsBindMount bool
- Writable bool
- containers map[string]struct{}
- configPath string
- repository *Repository
- lock sync.Mutex
-}
-
-func (v *Volume) IsDir() (bool, error) {
- stat, err := os.Stat(v.Path)
- if err != nil {
- return false, err
- }
-
- return stat.IsDir(), nil
-}
-
-func (v *Volume) Containers() []string {
- v.lock.Lock()
-
- var containers []string
- for c := range v.containers {
- containers = append(containers, c)
- }
-
- v.lock.Unlock()
- return containers
-}
-
-func (v *Volume) RemoveContainer(containerId string) {
- v.lock.Lock()
- delete(v.containers, containerId)
- v.lock.Unlock()
-}
-
-func (v *Volume) AddContainer(containerId string) {
- v.lock.Lock()
- v.containers[containerId] = struct{}{}
- v.lock.Unlock()
-}
-
-func (v *Volume) initialize() error {
- v.lock.Lock()
- defer v.lock.Unlock()
-
- if _, err := os.Stat(v.Path); err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- if err := os.MkdirAll(v.Path, 0755); err != nil {
- return err
- }
- }
-
- if err := os.MkdirAll(v.configPath, 0755); err != nil {
- return err
- }
-
- return v.toDisk()
-}
-
-func (v *Volume) ToDisk() error {
- v.lock.Lock()
- defer v.lock.Unlock()
- return v.toDisk()
-}
-
-func (v *Volume) toDisk() error {
- jsonPath, err := v.jsonPath()
- if err != nil {
- return err
- }
- f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- return err
- }
- if err := json.NewEncoder(f).Encode(v); err != nil {
- f.Close()
- return err
- }
- return f.Close()
-}
-
-func (v *Volume) FromDisk() error {
- v.lock.Lock()
- defer v.lock.Unlock()
- pth, err := v.jsonPath()
- if err != nil {
- return err
- }
-
- jsonSource, err := os.Open(pth)
- if err != nil {
- return err
- }
- defer jsonSource.Close()
-
- dec := json.NewDecoder(jsonSource)
-
- return dec.Decode(v)
-}
-
-func (v *Volume) jsonPath() (string, error) {
- return v.GetRootResourcePath("config.json")
-}
-
-// Evalutes `path` in the scope of the volume's root path, with proper path
-// sanitisation. Symlinks are all scoped to the root of the volume, as
-// though the volume's root was `/`.
-//
-// The volume's root path is the host-facing path of the root of the volume's
-// mountpoint inside a container.
-//
-// NOTE: The returned path is *only* safely scoped inside the volume's root
-// if no component of the returned path changes (such as a component
-// symlinking to a different path) between using this method and using the
-// path. See symlink.FollowSymlinkInScope for more details.
-func (v *Volume) GetResourcePath(path string) (string, error) {
- cleanPath := filepath.Join("/", path)
- return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path)
-}
-
-// Evalutes `path` in the scope of the volume's config path, with proper path
-// sanitisation. Symlinks are all scoped to the root of the config path, as
-// though the config path was `/`.
-//
-// The config path of a volume is not exposed to the container and is just used
-// to store volume configuration options and other internal information. If in
-// doubt, you probably want to just use v.GetResourcePath.
-//
-// NOTE: The returned path is *only* safely scoped inside the volume's config
-// path if no component of the returned path changes (such as a component
-// symlinking to a different path) between using this method and using the
-// path. See symlink.FollowSymlinkInScope for more details.
-func (v *Volume) GetRootResourcePath(path string) (string, error) {
- cleanPath := filepath.Join("/", path)
- return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
-}
diff --git a/volumes/volume_test.go b/volumes/volume_test.go
deleted file mode 100644
index b30549d379..0000000000
--- a/volumes/volume_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package volumes
-
-import (
- "os"
- "testing"
-
- "github.com/docker/docker/pkg/stringutils"
-)
-
-func TestContainers(t *testing.T) {
- v := &Volume{containers: make(map[string]struct{})}
- id := "1234"
-
- v.AddContainer(id)
-
- if v.Containers()[0] != id {
- t.Fatalf("adding a container ref failed")
- }
-
- v.RemoveContainer(id)
- if len(v.Containers()) != 0 {
- t.Fatalf("removing container failed")
- }
-}
-
-// os.Stat(v.Path) is returning ErrNotExist, initialize catch it and try to
-// mkdir v.Path but it dies and correctly returns the error
-func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) {
- v := &Volume{Path: "nonexistentpath"}
-
- err := v.initialize()
- if err == nil {
- t.Fatal("Expected not to initialize volume with a non existent path")
- }
-
- if !os.IsNotExist(err) {
- t.Fatalf("Expected to get ErrNotExist error, got %s", err)
- }
-}
-
-// os.Stat(v.Path) is NOT returning ErrNotExist so skip and return error from
-// initialize
-func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) {
- // ENAMETOOLONG
- v := &Volume{Path: stringutils.GenerateRandomAlphaOnlyString(300)}
-
- err := v.initialize()
- if err == nil {
- t.Fatal("Expected not to initialize volume with a non existent path")
- }
-
- if os.IsNotExist(err) {
- t.Fatal("Expected to not get ErrNotExist")
- }
-}