From e51af36a85126aca6bf6da5291eaf960fd82aa56 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 21 Nov 2013 21:45:48 +0100 Subject: [PATCH 01/33] Add experimenta btrfs driver This is an experimental btrfs driver. To use it you must have /var/lib/docker mounted on a btrfs filesystem and explicitly specify DOCKER_DRIVER=btrfs in the docker daemon environment. It works by using subvolumes for the docker image/container layers. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- graphdriver/btrfs/btrfs.go | 215 +++++++++++++++++++++++++ graphdriver/btrfs/dummy_unsupported.go | 3 + graphdriver/driver.go | 2 + runtime.go | 1 + 4 files changed, 221 insertions(+) create mode 100644 graphdriver/btrfs/btrfs.go create mode 100644 graphdriver/btrfs/dummy_unsupported.go diff --git a/graphdriver/btrfs/btrfs.go b/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..52ed66b174 --- /dev/null +++ b/graphdriver/btrfs/btrfs.go @@ -0,0 +1,215 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include +#include +#include +#include + +*/ +import "C" +import ( + "fmt" + "github.com/dotcloud/docker/graphdriver" + "os" + "path" + "syscall" + "unsafe" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if buf.Type != 0x9123683E { + return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) + } + + return &Driver{ + home: home, + }, nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Can't create subvolume") + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Can't create subvolume") + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Can't create subvolume") + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent) + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/graphdriver/btrfs/dummy_unsupported.go b/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..5efd18081f --- /dev/null +++ b/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package btrfs diff --git a/graphdriver/driver.go b/graphdriver/driver.go index 2be3f05f3a..c0ed00b0ad 100644 --- a/graphdriver/driver.go +++ b/graphdriver/driver.go @@ -41,6 +41,8 @@ var ( "aufs", "devicemapper", "vfs", + // experimental, has to be enabled manually for now + "btrfs", } ) diff --git a/runtime.go b/runtime.go index 1b17bac973..c33220b3a6 100644 --- a/runtime.go +++ b/runtime.go @@ -9,6 +9,7 @@ import ( "github.com/dotcloud/docker/execdriver/lxc" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" + _ "github.com/dotcloud/docker/graphdriver/btrfs" _ "github.com/dotcloud/docker/graphdriver/devmapper" _ "github.com/dotcloud/docker/graphdriver/vfs" "github.com/dotcloud/docker/pkg/graphdb" From d419da7227826e84e9375ece4fd9d4978a42cbf7 Mon Sep 17 00:00:00 2001 From: "Chris St. Pierre" Date: Fri, 17 Jan 2014 08:46:11 -0500 Subject: [PATCH 02/33] Added new mkimage-yum.sh script to create CentOS base images mkimage-rinse.sh requires rinse, which is not readily available on CentOS or Fedora. Plus, creating a base image is trivial with yum alone. Docker-DCO-1.1-Signed-off-by: Chris St. Pierre (github: stpierre) --- contrib/mkimage-rinse.sh | 7 +++ contrib/mkimage-yum.sh | 90 ++++++++++++++++++++++++++++ docs/sources/articles/baseimages.rst | 5 +- 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100755 contrib/mkimage-yum.sh diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh index de9265d48c..dfe9999d92 100755 --- a/contrib/mkimage-rinse.sh +++ b/contrib/mkimage-rinse.sh @@ -1,4 +1,11 @@ #!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + set -e repo="$1" diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh new file mode 100755 index 0000000000..594eb96fec --- /dev/null +++ b/contrib/mkimage-yum.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +usage() { + cat < +OPTIONS: + -y The path to the yum config to install packages from. The + default is /etc/yum.conf. +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +while getopts ":y:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +#-------------------- + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +for dev in console null zero urandom; do + /sbin/MAKEDEV -d "$target"/dev -x $dev +done + +yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall Core +yum -c "$yum_config" --installroot="$mount" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version +docker run -i -t $name:$version echo success + +rm -rf "$target" diff --git a/docs/sources/articles/baseimages.rst b/docs/sources/articles/baseimages.rst index 51a51e2f93..0523434e0b 100644 --- a/docs/sources/articles/baseimages.rst +++ b/docs/sources/articles/baseimages.rst @@ -37,7 +37,10 @@ There are more example scripts for creating base images in the Docker Github Repo: * `BusyBox `_ -* `CentOS / Scientific Linux CERN (SLC) +* CentOS / Scientific Linux CERN (SLC) `on Debian/Ubuntu `_ + or + `on CentOS/RHEL/SLC/etc. + `_ * `Debian / Ubuntu `_ From fd2403b8303f7ea9152dd08021d2c9c7070aefb4 Mon Sep 17 00:00:00 2001 From: "Chris St. Pierre" Date: Thu, 23 Jan 2014 12:01:21 -0500 Subject: [PATCH 03/33] fix shebang in mkimage-yum.sh Docker-DCO-1.1-Signed-off-by: Chris St. Pierre (github: stpierre) --- contrib/mkimage-yum.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh index 594eb96fec..54e99f1f04 100755 --- a/contrib/mkimage-yum.sh +++ b/contrib/mkimage-yum.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Create a base CentOS Docker image. # From b994c1315997945815e7b927b8493595b194d016 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 24 Jan 2014 23:01:12 -0700 Subject: [PATCH 04/33] Update hack/make.sh with slightly clearer output and a more precise "are we in the Docker container?" check Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make.sh | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/hack/make.sh b/hack/make.sh index 6029c9ec10..ef13c1a283 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -25,12 +25,18 @@ set -o pipefail # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! -RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf) -grep -q "$RESOLVCONF" /proc/mounts || { - echo >&2 "# WARNING! I don't seem to be running in a docker container." - echo >&2 "# The result of this command might be an incorrect build, and will not be officially supported." - echo >&2 "# Try this: 'make all'" -} +if [ "$(pwd)" != '/go/src/github.com/dotcloud/docker' ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + { + echo "# WARNING! I don't seem to be running in the Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( From 07c4eda46a6a1af82b4b519d9186b9bf7881d7cc Mon Sep 17 00:00:00 2001 From: Andy Rothfusz Date: Thu, 23 Jan 2014 18:59:00 -0800 Subject: [PATCH 05/33] Fix 2720 -- Expanded documentation for docker run. Docker-DCO-1.1-Signed-off-by: Andy Rothfusz (github: metalivedev) --- docs/sources/articles/index.rst | 1 + docs/sources/articles/runmetrics.rst | 469 +++++++++++++++++++++ docs/sources/reference/builder.rst | 8 +- docs/sources/reference/commandline/cli.rst | 39 ++ docs/sources/reference/index.rst | 1 + docs/sources/reference/run.rst | 353 ++++++++++++++++ 6 files changed, 867 insertions(+), 4 deletions(-) create mode 100644 docs/sources/articles/runmetrics.rst create mode 100644 docs/sources/reference/run.rst diff --git a/docs/sources/articles/index.rst b/docs/sources/articles/index.rst index 2cfc427420..75c0cd3fa9 100644 --- a/docs/sources/articles/index.rst +++ b/docs/sources/articles/index.rst @@ -12,3 +12,4 @@ Articles security baseimages + runmetrics diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst new file mode 100644 index 0000000000..f7406bc5ed --- /dev/null +++ b/docs/sources/articles/runmetrics.rst @@ -0,0 +1,469 @@ +:title: Runtime Metrics +:description: Measure the behavior of running containers +:keywords: docker, metrics, CPU, memory, disk, IO, run, runtime + +.. _run_metrics: + + +Runtime Metrics +=============== + +Linux Containers rely on `control groups +`_ which +not only track groups of processes, but also expose metrics about CPU, +memory, and block I/O usage. You can access those metrics and obtain +network usage metrics as well. This is relevant for "pure" LXC +containers, as well as for Docker containers. + +Control Groups +-------------- + +Control groups are exposed through a pseudo-filesystem. In recent +distros, you should find this filesystem under +``/sys/fs/cgroup``. Under that directory, you will see multiple +sub-directories, called devices, freezer, blkio, etc.; each +sub-directory actually corresponds to a different cgroup hierarchy. + +On older systems, the control groups might be mounted on ``/cgroup``, +without distinct hierarchies. In that case, instead of seeing the +sub-directories, you will see a bunch of files in that directory, and +possibly some directories corresponding to existing containers. + +To figure out where your control groups are mounted, you can run: + +:: + + grep cgroup /proc/mounts + +.. _run_findpid: + +Ennumerating Cgroups +-------------------- + +You can look into ``/proc/cgroups`` to see the different control group +subsystems known to the system, the hierarchy they belong to, and how +many groups they contain. + +You can also look at ``/proc//cgroup`` to see which control +groups a process belongs to. The control group will be shown as a path +relative to the root of the hierarchy mountpoint; e.g. ``/`` means +“this process has not been assigned into a particular group”, while +``/lxc/pumpkin`` means that the process is likely to be a member of a +container named ``pumpkin``. + +Finding the Cgroup for a Given Container +---------------------------------------- + +For each container, one cgroup will be created in each hierarchy. On +older systems with older versions of the LXC userland tools, the name +of the cgroup will be the name of the container. With more recent +versions of the LXC tools, the cgroup will be ``lxc/.`` + +For Docker containers using cgroups, the container name will be the +full ID or long ID of the container. If a container shows up as +ae836c95b4c3 in ``docker ps``, its long ID might be something like +``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You +can look it up with ``docker inspect`` or ``docker ps -notrunc``. + +Putting everything together to look at the memory metrics for a Docker +container, take a look at ``/sys/fs/cgroup/memory/lxc//``. + +Metrics from Cgroups: Memory, CPU, Block IO +------------------------------------------- + +For each subsystem (memory, cpu, and block i/o), you will find one or +more pseudo-files containing statistics. + +Memory Metrics: ``memory.stat`` +............................... + +Memory metrics are found in the "memory" cgroup. Note that the memory +control group adds a little overhead, because it does very +fine-grained accounting of the memory usage on your system. Therefore, +many distros chose to not enable it by default. Generally, to enable +it, all you have to do is to add some kernel command-line parameters: +``cgroup_enable=memory swapaccount=1``. + +The metrics are in the pseudo-file ``memory.stat``. Here is what it +will look like: + +:: + + cache 11492564992 + rss 1930993664 + mapped_file 306728960 + pgpgin 406632648 + pgpgout 403355412 + swap 0 + pgfault 728281223 + pgmajfault 1724 + inactive_anon 46608384 + active_anon 1884520448 + inactive_file 7003344896 + active_file 4489052160 + unevictable 32768 + hierarchical_memory_limit 9223372036854775807 + hierarchical_memsw_limit 9223372036854775807 + total_cache 11492564992 + total_rss 1930993664 + total_mapped_file 306728960 + total_pgpgin 406632648 + total_pgpgout 403355412 + total_swap 0 + total_pgfault 728281223 + total_pgmajfault 1724 + total_inactive_anon 46608384 + total_active_anon 1884520448 + total_inactive_file 7003344896 + total_active_file 4489052160 + total_unevictable 32768 + +The first half (without the ``total_`` prefix) contains statistics +relevant to the processes within the cgroup, excluding +sub-cgroups. The second half (with the ``total_`` prefix) includes +sub-cgroups as well. + +Some metrics are "gauges", i.e. values that can increase or decrease +(e.g. swap, the amount of swap space used by the members of the +cgroup). Some others are "counters", i.e. values that can only go up, +because they represent occurrences of a specific event (e.g. pgfault, +which indicates the number of page faults which happened since the +creation of the cgroup; this number can never decrease). + +cache + the amount of memory used by the processes of this control group + that can be associated precisely with a block on a block + device. When you read and write files from and to disk, this amount + will increase. This will be the case if you use "conventional" I/O + (``open``, ``read``, ``write`` syscalls) as well as mapped files + (with ``mmap``). It also accounts for the memory used by ``tmpfs`` + mounts, though the reasons are unclear. + +rss + the amount of memory that *doesn't* correspond to anything on + disk: stacks, heaps, and anonymous memory maps. + +mapped_file + indicates the amount of memory mapped by the processes in the + control group. It doesn't give you information about *how much* + memory is used; it rather tells you *how* it is used. + +pgpgin and pgpgout + correspond to *charging events*. Each time a page is "charged" + (=added to the accounting) to a cgroup, pgpgin increases. When a + page is "uncharged" (=no longer "billed" to a cgroup), pgpgout + increases. + +pgfault and pgmajfault + indicate the number of times that a process of the cgroup triggered + a "page fault" and a "major fault", respectively. A page fault + happens when a process accesses a part of its virtual memory space + which is inexistent or protected. The former can happen if the + process is buggy and tries to access an invalid address (it will + then be sent a ``SIGSEGV`` signal, typically killing it with the + famous ``Segmentation fault`` message). The latter can happen when + the process reads from a memory zone which has been swapped out, or + which corresponds to a mapped file: in that case, the kernel will + load the page from disk, and let the CPU complete the memory + access. It can also happen when the process writes to a + copy-on-write memory zone: likewise, the kernel will preempt the + process, duplicate the memory page, and resume the write operation + on the process' own copy of the page. "Major" faults happen when the + kernel actually has to read the data from disk. When it just has to + duplicate an existing page, or allocate an empty page, it's a + regular (or "minor") fault. + +swap + the amount of swap currently used by the processes in this cgroup. + +active_anon and inactive_anon + the amount of *anonymous* memory that has been identified has + respectively *active* and *inactive* by the kernel. "Anonymous" + memory is the memory that is *not* linked to disk pages. In other + words, that's the equivalent of the rss counter described above. In + fact, the very definition of the rss counter is **active_anon** + + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory + used up by ``tmpfs`` filesystems mounted by this control + group). Now, what's the difference between "active" and "inactive"? + Pages are initially "active"; and at regular intervals, the kernel + sweeps over the memory, and tags some pages as "inactive". Whenever + they are accessed again, they are immediately retagged + "active". When the kernel is almost out of memory, and time comes to + swap out to disk, the kernel will swap "inactive" pages. + +active_file and inactive_file + cache memory, with *active* and *inactive* similar to the *anon* + memory above. The exact formula is cache = **active_file** + + **inactive_file** + **tmpfs**. The exact rules used by the kernel to + move memory pages between active and inactive sets are different + from the ones used for anonymous memory, but the general principle + is the same. Note that when the kernel needs to reclaim memory, it + is cheaper to reclaim a clean (=non modified) page from this pool, + since it can be reclaimed immediately (while anonymous pages and + dirty/modified pages have to be written to disk first). + +unevictable + the amount of memory that cannot be reclaimed; generally, it will + account for memory that has been "locked" with ``mlock``. It is + often used by crypto frameworks to make sure that secret keys and + other sensitive material never gets swapped out to disk. + +memory and memsw limits + These are not really metrics, but a reminder of the limits applied + to this cgroup. The first one indicates the maximum amount of + physical memory that can be used by the processes of this control + group; the second one indicates the maximum amount of RAM+swap. + +Accounting for memory in the page cache is very complex. If two +processes in different control groups both read the same file +(ultimately relying on the same blocks on disk), the corresponding +memory charge will be split between the control groups. It's nice, but +it also means that when a cgroup is terminated, it could increase the +memory usage of another cgroup, because they are not splitting the +cost anymore for those memory pages. + +CPU metrics: ``cpuacct.stat`` +............................. + +Now that we've covered memory metrics, everything else will look very +simple in comparison. CPU metrics will be found in the ``cpuacct`` +controller. + +For each container, you will find a pseudo-file ``cpuacct.stat``, +containing the CPU usage accumulated by the processes of the +container, broken down between ``user`` and ``system`` time. If you're +not familiar with the distinction, ``user`` is the time during which +the processes were in direct control of the CPU (i.e. executing +process code), and ``system`` is the time during which the CPU was +executing system calls on behalf of those processes. + +Those times are expressed in ticks of 1/100th of second. Actually, +they are expressed in "user jiffies". There are ``USER_HZ`` +*"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This +used to map exactly to the number of scheduler "ticks" per second; but +with the advent of higher frequency scheduling, as well as `tickless +kernels `_, the number of kernel +ticks wasn't relevant anymore. It stuck around anyway, mainly for +legacy and compatibility reasons. + +Block I/O metrics +................. + +Block I/O is accounted in the ``blkio`` controller. Different metrics +are scattered across different files. While you can find in-depth +details in the `blkio-controller +`_ +file in the kernel documentation, here is a short list of the most +relevant ones: + +blkio.sectors + contain the number of 512-bytes sectors read and written by the + processes member of the cgroup, device by device. Reads and writes + are merged in a single counter. + +blkio.io_service_bytes + indicates the number of bytes read and written by the cgroup. It has + 4 counters per device, because for each device, it differentiates + between synchronous vs. asynchronous I/O, and reads vs. writes. + +blkio.io_serviced + the number of I/O operations performed, regardless of their size. It + also has 4 counters per device. + +blkio.io_queued + indicates the number of I/O operations currently queued for this + cgroup. In other words, if the cgroup isn't doing any I/O, this will + be zero. Note that the opposite is not true. In other words, if + there is no I/O queued, it does not mean that the cgroup is idle + (I/O-wise). It could be doing purely synchronous reads on an + otherwise quiescent device, which is therefore able to handle them + immediately, without queuing. Also, while it is helpful to figure + out which cgroup is putting stress on the I/O subsystem, keep in + mind that is is a relative quantity. Even if a process group does + not perform more I/O, its queue size can increase just because the + device load increases because of other devices. + +Network Metrics +--------------- + +Network metrics are not exposed directly by control groups. There is a +good explanation for that: network interfaces exist within the context +of *network namespaces*. The kernel could probably accumulate metrics +about packets and bytes sent and received by a group of processes, but +those metrics wouldn't be very useful. You want per-interface metrics +(because traffic happening on the local ``lo`` interface doesn't +really count). But since processes in a single cgroup can belong to +multiple network namespaces, those metrics would be harder to +interpret: multiple network namespaces means multiple ``lo`` +interfaces, potentially multiple ``eth0`` interfaces, etc.; so this is +why there is no easy way to gather network metrics with control +groups. + +Instead we can gather network metrics from other sources: + +IPtables +........ + +IPtables (or rather, the netfilter framework for which iptables is +just an interface) can do some serious accounting. + +For instance, you can setup a rule to account for the outbound HTTP +traffic on a web server: + +:: + + iptables -I OUTPUT -p tcp --sport 80 + + +There is no ``-j`` or ``-g`` flag, so the rule will just count matched +packets and go to the following rule. + +Later, you can check the values of the counters, with: + +:: + + iptables -nxvL OUTPUT + +Technically, ``-n`` is not required, but it will prevent iptables from +doing DNS reverse lookups, which are probably useless in this +scenario. + +Counters include packets and bytes. If you want to setup metrics for +container traffic like this, you could execute a ``for`` loop to add +two ``iptables`` rules per container IP address (one in each +direction), in the ``FORWARD`` chain. This will only meter traffic +going through the NAT layer; you will also have to add traffic going +through the userland proxy. + +Then, you will need to check those counters on a regular basis. If you +happen to use ``collectd``, there is a nice plugin to automate +iptables counters collection. + +Interface-level counters +........................ + +Since each container has a virtual Ethernet interface, you might want +to check directly the TX and RX counters of this interface. You will +notice that each container is associated to a virtual Ethernet +interface in your host, with a name like ``vethKk8Zqi``. Figuring out +which interface corresponds to which container is, unfortunately, +difficult. + +But for now, the best way is to check the metrics *from within the +containers*. To accomplish this, you can run an executable from the +host environment within the network namespace of a container using +**ip-netns magic**. + +The ``ip-netns exec`` command will let you execute any program +(present in the host system) within any network namespace visible to +the current process. This means that your host will be able to enter +the network namespace of your containers, but your containers won't be +able to access the host, nor their sibling containers. Containers will +be able to “see” and affect their sub-containers, though. + +The exact format of the command is:: + + ip netns exec + +For example:: + + ip netns exec mycontainer netstat -i + +``ip netns`` finds the "mycontainer" container by using namespaces +pseudo-files. Each process belongs to one network namespace, one PID +namespace, one ``mnt`` namespace, etc., and those namespaces are +materialized under ``/proc//ns/``. For example, the network +namespace of PID 42 is materialized by the pseudo-file +``/proc/42/ns/net``. + +When you run ``ip netns exec mycontainer ...``, it expects +``/var/run/netns/mycontainer`` to be one of those +pseudo-files. (Symlinks are accepted.) + +In other words, to execute a command within the network namespace of a +container, we need to: + +* find out the PID of any process within the container that we want to + investigate; +* create a symlink from ``/var/run/netns/`` to + ``/proc//ns/net`` +* execute ``ip netns exec ....`` + +Please review :ref:`run_findpid` to learn how to find the cgroup of a +pprocess running in the container of which you want to measure network +usage. From there, you can examine the pseudo-file named ``tasks``, +which containes the PIDs that are in the control group (i.e. in the +container). Pick any one of them. + +Putting everything together, if the "short ID" of a container is held +in the environment variable ``$CID``, then you can do this:: + + TASKS=/sys/fs/cgroup/devices/$CID*/tasks + PID=$(head -n 1 $TASKS) + mkdir -p /var/run/netns + ln -sf /proc/$PID/ns/net /var/run/netns/$CID + ip netns exec $CID netstat -i + + +Tips for high-performance metric collection +------------------------------------------- + +Note that running a new process each time you want to update metrics +is (relatively) expensive. If you want to collect metrics at high +resolutions, and/or over a large number of containers (think 1000 +containers on a single host), you do not want to fork a new process +each time. + +Here is how to collect metrics from a single process. You will have to +write your metric collector in C (or any language that lets you do +low-level system calls). You need to use a special system call, +``setns()``, which lets the current process enter any arbitrary +namespace. It requires, however, an open file descriptor to the +namespace pseudo-file (remember: that’s the pseudo-file in +``/proc//ns/net``). + +However, there is a catch: you must not keep this file descriptor +open. If you do, when the last process of the control group exits, the +namespace will not be destroyed, and its network resources (like the +virtual interface of the container) will stay around for ever (or +until you close that file descriptor). + +The right approach would be to keep track of the first PID of each +container, and re-open the namespace pseudo-file each time. + +Collecting metrics when a container exits +----------------------------------------- + +Sometimes, you do not care about real time metric collection, but when +a container exits, you want to know how much CPU, memory, etc. it has +used. + +Docker makes this difficult because it relies on ``lxc-start``, which +carefully cleans up after itself, but it is still possible. It is +usually easier to collect metrics at regular intervals (e.g. every +minute, with the collectd LXC plugin) and rely on that instead. + +But, if you'd still like to gather the stats when a container stops, +here is how: + +For each container, start a collection process, and move it to the +control groups that you want to monitor by writing its PID to the +tasks file of the cgroup. The collection process should periodically +re-read the tasks file to check if it's the last process of the +control group. (If you also want to collect network statistics as +explained in the previous section, you should also move the process to +the appropriate network namespace.) + +When the container exits, ``lxc-start`` will try to delete the control +groups. It will fail, since the control group is still in use; but +that’s fine. You process should now detect that it is the only one +remaining in the group. Now is the right time to collect all the +metrics you need! + +Finally, your process should move itself back to the root control +group, and remove the container control group. To remove a control +group, just ``rmdir`` its directory. It's counter-intuitive to +``rmdir`` a directory as it still contains files; but remember that +this is a pseudo-filesystem, so usual rules don't apply. After the +cleanup is done, the collection process can exit safely. + diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 45cb2ab86e..9889660913 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -1,12 +1,12 @@ -:title: Build Images (Dockerfile Reference) +:title: Dockerfile Reference :description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image. :keywords: builder, docker, Dockerfile, automation, image creation .. _dockerbuilder: -=================================== -Build Images (Dockerfile Reference) -=================================== +==================== +Dockerfile Reference +==================== **Docker can act as a builder** and read instructions from a text ``Dockerfile`` to automate the steps you would otherwise take manually diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index e71b691bcc..a636df6259 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -18,6 +18,45 @@ To list available commands, either run ``docker`` with no parameters or execute ... +.. _cli_options: + +Types of Options +---------------- + +Boolean +~~~~~~~ + +Boolean options look like ``-d=false``. The value you see is the +default value which gets set if you do **not** use the boolean +flag. If you do call ``run -d``, that sets the opposite boolean value, +so in this case, ``true``, and so ``docker run -d`` **will** run in +"detached" mode, in the background. Other boolean options are similar +-- specifying them will set the value to the opposite of the default +value. + +Multi +~~~~~ + +Options like ``-a=[]`` indicate they can be specified multiple times:: + + docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash + +Sometimes this can use a more complex value string, as for ``-v``:: + + docker run -v /host:/container example/mysql + +Strings and Integers +~~~~~~~~~~~~~~~~~~~~ + +Options like ``-name=""`` expect a string, and they can only be +specified once. Options like ``-c=0`` expect an integer, and they can +only be specified once. + +---- + +Commands +-------- + .. _cli_daemon: ``daemon`` diff --git a/docs/sources/reference/index.rst b/docs/sources/reference/index.rst index 49099d5621..d35a19b93d 100644 --- a/docs/sources/reference/index.rst +++ b/docs/sources/reference/index.rst @@ -14,4 +14,5 @@ Contents: commandline/index builder + run api/index diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst new file mode 100644 index 0000000000..7505b7c02f --- /dev/null +++ b/docs/sources/reference/run.rst @@ -0,0 +1,353 @@ +:title: Docker Run Reference +:description: Configure containers at runtime +:keywords: docker, run, configure, runtime + +.. _run_docker: + +==================== +Docker Run Reference +==================== + +**Docker runs processes in isolated containers**. When an operator +executes ``docker run``, she starts a process with its own file +system, its own networking, and its own isolated process tree. The +:ref:`image_def` which starts the process may define defaults related +to the binary to run, the networking to expose, and more, but ``docker +run`` gives final control to the operator who starts the container +from the image. That's the main reason :ref:`cli_run` has more options +than any other ``docker`` command. + +Every one of the :ref:`example_list` shows running containers, and so +here we try to give more in-depth guidance. + +.. contents:: Table of Contents + +.. _run_running: + +General Form +============ + +As you've seen in the :ref:`example_list`, the basic `run` command +takes this form:: + + docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + +To learn how to interpret the types of ``[OPTIONS]``, see +:ref:`cli_options`. + +The list of ``[OPTIONS]`` breaks down into two groups: + +* options that define the runtime behavior or environment, and +* options that override image defaults. + +Since image defaults usually get set in :ref:`Dockerfiles +` (though they could also be set at :ref:`cli_commit` +time too), we will group the runtime options here by their related +Dockerfile commands so that it is easier to see how to override image +defaults and set new behavior. + +We'll start, though, with the options that are unique to ``docker +run``, the options which define the runtime behavior or the container +environment. + +.. note:: The runtime operator always has final control over the + behavior of a Docker container. + +Detached or Foreground +====================== + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode:: + + -d=false: Detached mode: Run container in the background, print new container id + +Detached (-d) +............. + +In detached mode (``-d=true`` or just ``-d``), all IO should be done +through network connections or shared volumes because the container is +no longer listening to the commandline where you executed ``docker +run``. You can reattach to a detached container with ``docker`` +:ref:`cli_attach`. If you choose to run a container in the detached +mode, then you cannot use the ``-rm`` option. + +Foreground +.......... + +In foreground mode (the default when ``-d`` is not specified), +``docker run`` can start the process in the container and attach the +console to the process's standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. All of that is +configurable:: + + -a=[] : Attach to stdin, stdout and/or stderr + -t=false : Allocate a pseudo-tty + -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + -i=false : Keep stdin open even if not attached + +If you do not specify ``-a`` then Docker will `attach everything +(stdin,stdout,stderr) +`_. You +can specify which of the three standard streams (stdin, stdout, +stderr) you'd like to connect between your instead, as in:: + + docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell) you will typically want a tty +as well as persistent standard in, so you'll use ``-i -t`` together in +most interactive cases. + +Clean Up (-rm) +-------------- + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the ``-rm`` flag:: + + -rm=false: Automatically remove the container when it exits (incompatible with -d) + +Name (-name) +============ + +The operator can identify a container in three ways: + +* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") +* UUID short identifier ("f78375b1c487") +* name ("evil_ptolemy") + +The UUID identifiers come from the Docker daemon, and if you do not +assign a name to the container with ``-name`` then the daemon will +also generate a random string name too. The name can become a handy +way to add meaning to a container since you can use this name when +defining :ref:`links ` (or any other place +you need to identify a container). This works for both background and +foreground Docker containers. + +PID Equivalent +============== + +And finally, to help with automation, you can have Docker write the +container id out to a file of your choosing. This is similar to how +some programs might write out their process ID to a file (you've seen +them as .pid files):: + + -cidfile="": Write the container ID to the file + +Overriding Dockerfile Image Defaults +==================================== + +When a developer builds an image from a :ref:`Dockerfile +` or when she commits it, the developer can set a +number of default parameters that take effect when the image starts up +as a container. + +Four of the Dockerfile commands cannot be overridden at runtime: +``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a +corresponding override in ``docker run``. We'll go through what the +developer might have set in each Dockerfile instruction and how the +operator can override that setting. + + +CMD +... + +Remember the optional ``COMMAND`` in the Docker commandline:: + + docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] + +This command is optional because the person who created the ``IMAGE`` +may have already provided a default ``COMMAND`` using the Dockerfile +``CMD``. As the operator (the person running a container from the +image), you can override that ``CMD`` just by specifying a new +``COMMAND``. + +If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or +``COMMAND`` get appended as arguments to the ``ENTRYPOINT``. + + +ENTRYPOINT +.......... + +:: + + -entrypoint="": Overwrite the default entrypoint set by the image + +The ENTRYPOINT of an image is similar to a COMMAND because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a +container its default nature or behavior, so that when you set an +ENTRYPOINT you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via +the COMMAND. But, sometimes an operator may want to run something else +inside the container, so you can override the default ENTRYPOINT at +runtime by using a string to specify the new ENTRYPOINT. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like ``/usr/bin/redis-server``):: + + docker run -i -t -entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT:: + + docker run -i -t -entrypoint /bin/bash example/redis -c ls -l + docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help + + +EXPOSE (``run`` Networking Options) +................................... + +The *Dockerfile* doesn't give much control over networking, only +providing the EXPOSE instruction to give a hint to the operator about +what incoming ports might provide services. At runtime, however, +Docker provides a number of ``run`` options related to networking:: + + -n=true : Enable networking for this container + -dns=[] : Set custom dns servers for the container + -expose=[]: Expose a port from the container + without publishing it to your host + -P=false : Publish all exposed ports to the host interfaces + -p=[] : Publish a container's port to the host (format: + ip:hostPort:containerPort | ip::containerPort | + hostPort:containerPort) + (use 'docker port' to see the actual mapping) + -link="" : Add link to another container (name:alias) + +By default, all containers have networking enabled and they can make +any outgoing connections. The operator can completely disable +networking with ``run -n`` which disables all incoming and outgoing +networking. In cases like this, you would perform IO through files or +stdin/stdout only. + +Your container will use the same DNS servers as the host by default, +but you can override this with ``-dns``. + +As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port +available **in** a container for incoming connections. The port number +on the inside of the container (where the service listens) does not +need to be the same number as the port exposed on the outside of the +container (where clients connect), so inside the container you might +have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in +the Dockerfile), but outside the container the port might be 42800. + +To help a new client container reach the server container's internal +port operator ``-expose'd`` by the operator or ``EXPOSE'd`` by the +developer, the operator has three choices: start the server container +with ``-P`` or ``-p,`` or start the client container with ``-link``. + +If the operator uses ``-P`` or ``-p`` then Docker will make the +exposed port accessible on the host and the ports will be available to +any client that can reach the host. To find the map between the host +ports and the exposed ports, use ``docker port``) + +If the operator uses ``-link`` when starting the new client container, +then the client container can access the exposed port via a private +networking interface. Docker will set some environment variables in +the client container to help indicate which interface and port to use. + +ENV (Environment Variables) +........................... + +The operator can **set any environment variable** in the container by +using one or more ``-e``, even overriding those already defined by the +developer with a Dockefile ``ENV``:: + + $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x container="lxc" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with ``-h``. + +``-link name:alias`` also sets environment variables, using the +*alias* string to define environment variables within the container +that give the IP and PORT information for connecting to the service +container. Let's imagine we have a container running Redis:: + + # Start the service container, named redis-name + $ docker run -d -name redis-name dockerfiles/redis + 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 + + # The redis-name container exposed port 6379 + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 4241164edf6f dockerfiles/redis:latest /redis-stable/src/re 5 seconds ago Up 4 seconds 6379/tcp redis-name + + # Note that there are no public ports exposed since we didn't use -p or -P + $ docker port 4241164edf6f 6379 + 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f + + +Yet we can get information about the redis container's exposed ports with ``-link``. Choose an alias that will form a valid environment variable! + +:: + + $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export + declare -x HOME="/" + declare -x HOSTNAME="acda7f7b1cdc" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x REDIS_ALIAS_NAME="/distracted_wright/redis" + declare -x REDIS_ALIAS_PORT="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP="tcp://172.17.0.32:6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_ADDR="172.17.0.32" + declare -x REDIS_ALIAS_PORT_6379_TCP_PORT="6379" + declare -x REDIS_ALIAS_PORT_6379_TCP_PROTO="tcp" + declare -x SHLVL="1" + declare -x container="lxc" + +And we can use that information to connect from another container as a client:: + + $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' + 172.17.0.32:6379> + +VOLUME (Shared Filesystems) +........................... + +:: + + -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. + If "container-dir" is missing, then docker creates a new volume. + -volumes-from="": Mount all volumes from the given container(s) + +The volumes commands are complex enough to have their own +documentation in section :ref:`volume_def`. A developer can define one +or more VOLUMEs associated with an image, but only the operator can +give access from one container to another (or from a container to a +volume mounted on the host). + +USER +.... + +:: + + -u="": Username or UID + +WORKDIR +....... + +:: + + -w="": Working directory inside the container + +Performance +=========== + +The operator can also adjust the performance parameters of the container:: + + -c=0 : CPU shares (relative weight) + -m="": Memory limit (format: , where unit = b, k, m or g) + + -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + -privileged=false: Give extended privileges to this container + From a1851a6d3e69d67bfc4e6bfdec85ba58293351b9 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 28 Jan 2014 09:56:51 +0100 Subject: [PATCH 06/33] btrfs: Add comment to Put() Document why we don't need to do anything in Put(). Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- graphdriver/btrfs/btrfs.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/graphdriver/btrfs/btrfs.go b/graphdriver/btrfs/btrfs.go index 52ed66b174..e8dc6bd0e9 100644 --- a/graphdriver/btrfs/btrfs.go +++ b/graphdriver/btrfs/btrfs.go @@ -206,6 +206,8 @@ func (d *Driver) Get(id string) (string, error) { } func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. } func (d *Driver) Exists(id string) bool { From db250f709ad5bcee313710d34e0b6ef02abdc326 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 24 Jan 2014 22:22:53 -0600 Subject: [PATCH 07/33] network: add publicly mapped ports to FORWARD table Allow publicly mapped ports to be made public beyond the host. This is needed for distros like Fedora and RHEL which have a reject all rule at the end of their FORWARD table. Docker-DCO-1.1-Signed-off-by: Josh Poimboeuf (github: jpoimboe) --- pkg/iptables/iptables.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 0438bcbd88..2df93657ac 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -73,6 +73,23 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } + + fAction := action + if fAction == Add { + fAction = "-I" + } + if output, err := Raw(string(fAction), "FORWARD", + "!", "-i", c.Bridge, + "-o", c.Bridge, + "-p", proto, + "-d", daddr, + "--dport", strconv.Itoa(port), + "-j", "ACCEPT"); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Error iptables forward: %s", output) + } + return nil } From 99756ef11fc2a3ae821bb412607e7e9b322f278a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 28 Jan 2014 15:42:46 -0800 Subject: [PATCH 08/33] Initial move of port mapper code into sub pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- network.go | 166 +++++------------------------ networkdriver/portmapper/mapper.go | 143 +++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 139 deletions(-) create mode 100644 networkdriver/portmapper/mapper.go diff --git a/network.go b/network.go index 250f7b594f..05ff005ee6 100644 --- a/network.go +++ b/network.go @@ -5,9 +5,9 @@ import ( "github.com/dotcloud/docker/networkdriver" "github.com/dotcloud/docker/networkdriver/ipallocator" "github.com/dotcloud/docker/networkdriver/portallocator" + "github.com/dotcloud/docker/networkdriver/portmapper" "github.com/dotcloud/docker/pkg/iptables" "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/proxy" "github.com/dotcloud/docker/utils" "io/ioutil" "log" @@ -159,129 +159,6 @@ func getIfaceAddr(name string) (net.Addr, error) { return addrs4[0], nil } -// Port mapper takes care of mapping external ports to containers by setting -// up iptables rules. -// It keeps track of all mappings and is able to unmap at will -type PortMapper struct { - tcpMapping map[string]*net.TCPAddr - tcpProxies map[string]proxy.Proxy - udpMapping map[string]*net.UDPAddr - udpProxies map[string]proxy.Proxy - - iptables *iptables.Chain - defaultIp net.IP - proxyFactoryFunc func(net.Addr, net.Addr) (proxy.Proxy, error) -} - -func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error { - - if _, isTCP := backendAddr.(*net.TCPAddr); isTCP { - mapKey := (&net.TCPAddr{Port: port, IP: ip}).String() - if _, exists := mapper.tcpProxies[mapKey]; exists { - return fmt.Errorf("TCP Port %s is already in use", mapKey) - } - backendPort := backendAddr.(*net.TCPAddr).Port - backendIP := backendAddr.(*net.TCPAddr).IP - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Add, ip, port, "tcp", backendIP.String(), backendPort); err != nil { - return err - } - } - mapper.tcpMapping[mapKey] = backendAddr.(*net.TCPAddr) - proxy, err := mapper.proxyFactoryFunc(&net.TCPAddr{IP: ip, Port: port}, backendAddr) - if err != nil { - mapper.Unmap(ip, port, "tcp") - return err - } - mapper.tcpProxies[mapKey] = proxy - go proxy.Run() - } else { - mapKey := (&net.UDPAddr{Port: port, IP: ip}).String() - if _, exists := mapper.udpProxies[mapKey]; exists { - return fmt.Errorf("UDP: Port %s is already in use", mapKey) - } - backendPort := backendAddr.(*net.UDPAddr).Port - backendIP := backendAddr.(*net.UDPAddr).IP - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Add, ip, port, "udp", backendIP.String(), backendPort); err != nil { - return err - } - } - mapper.udpMapping[mapKey] = backendAddr.(*net.UDPAddr) - proxy, err := mapper.proxyFactoryFunc(&net.UDPAddr{IP: ip, Port: port}, backendAddr) - if err != nil { - mapper.Unmap(ip, port, "udp") - return err - } - mapper.udpProxies[mapKey] = proxy - go proxy.Run() - } - return nil -} - -func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error { - if proto == "tcp" { - mapKey := (&net.TCPAddr{Port: port, IP: ip}).String() - backendAddr, ok := mapper.tcpMapping[mapKey] - if !ok { - return fmt.Errorf("Port tcp/%s is not mapped", mapKey) - } - if proxy, exists := mapper.tcpProxies[mapKey]; exists { - proxy.Close() - delete(mapper.tcpProxies, mapKey) - } - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil { - return err - } - } - delete(mapper.tcpMapping, mapKey) - } else { - mapKey := (&net.UDPAddr{Port: port, IP: ip}).String() - backendAddr, ok := mapper.udpMapping[mapKey] - if !ok { - return fmt.Errorf("Port udp/%s is not mapped", mapKey) - } - if proxy, exists := mapper.udpProxies[mapKey]; exists { - proxy.Close() - delete(mapper.udpProxies, mapKey) - } - if mapper.iptables != nil { - if err := mapper.iptables.Forward(iptables.Delete, ip, port, proto, backendAddr.IP.String(), backendAddr.Port); err != nil { - return err - } - } - delete(mapper.udpMapping, mapKey) - } - return nil -} - -func newPortMapper(config *DaemonConfig) (*PortMapper, error) { - // We can always try removing the iptables - if err := iptables.RemoveExistingChain("DOCKER"); err != nil { - return nil, err - } - var chain *iptables.Chain - if config.EnableIptables { - var err error - chain, err = iptables.NewChain("DOCKER", config.BridgeIface) - if err != nil { - return nil, fmt.Errorf("Failed to create DOCKER chain: %s", err) - } - } - - mapper := &PortMapper{ - tcpMapping: make(map[string]*net.TCPAddr), - tcpProxies: make(map[string]proxy.Proxy), - udpMapping: make(map[string]*net.UDPAddr), - udpProxies: make(map[string]proxy.Proxy), - iptables: chain, - defaultIp: config.DefaultIp, - proxyFactoryFunc: proxy.NewProxy, - } - return mapper, nil -} - // Network interface represents the networking stack of a container type NetworkInterface struct { IPNet net.IPNet @@ -299,7 +176,7 @@ func (iface *NetworkInterface) AllocatePort(port Port, binding PortBinding) (*Na return nil, fmt.Errorf("Trying to allocate port for interface %v, which is disabled", iface) // FIXME } - ip := iface.manager.portMapper.defaultIp + ip := iface.manager.defaultBindingIP if binding.HostIp != "" { ip = net.ParseIP(binding.HostIp) @@ -331,7 +208,7 @@ func (iface *NetworkInterface) AllocatePort(port Port, binding PortBinding) (*Na backend = &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort} } - if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil { + if err := portmapper.Map(backend, ip, extPort); err != nil { portallocator.ReleasePort(ip, nat.Port.Proto(), extPort) return nil, err } @@ -365,7 +242,15 @@ func (iface *NetworkInterface) Release() { } ip := net.ParseIP(nat.Binding.HostIp) utils.Debugf("Unmaping %s/%s:%s", nat.Port.Proto, ip.String(), nat.Binding.HostPort) - if err := iface.manager.portMapper.Unmap(ip, hostPort, nat.Port.Proto()); err != nil { + + var host net.Addr + if nat.Port.Proto() == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + } + + if err := portmapper.Unmap(host); err != nil { log.Printf("Unable to unmap port %s: %s", nat, err) } @@ -382,12 +267,10 @@ func (iface *NetworkInterface) Release() { // Network Manager manages a set of network interfaces // Only *one* manager per host machine should be used type NetworkManager struct { - bridgeIface string - bridgeNetwork *net.IPNet - - portMapper *PortMapper - - disabled bool + bridgeIface string + bridgeNetwork *net.IPNet + defaultBindingIP net.IP + disabled bool } // Allocate a network interface @@ -508,16 +391,21 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) { } } - portMapper, err := newPortMapper(config) - if err != nil { + // We can always try removing the iptables + if err := portmapper.RemoveIpTablesChain("DOCKER"); err != nil { return nil, err } - manager := &NetworkManager{ - bridgeIface: config.BridgeIface, - bridgeNetwork: network, - portMapper: portMapper, + if config.EnableIptables { + if err := portmapper.RegisterIpTablesChain("DOCKER", config.BridgeIface); err != nil { + return nil, err + } } + manager := &NetworkManager{ + bridgeIface: config.BridgeIface, + bridgeNetwork: network, + defaultBindingIP: config.DefaultIp, + } return manager, nil } diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go new file mode 100644 index 0000000000..74e2728ab6 --- /dev/null +++ b/networkdriver/portmapper/mapper.go @@ -0,0 +1,143 @@ +package portmapper + +import ( + "errors" + "fmt" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/proxy" + "net" + "sync" +) + +type mapping struct { + proto string + userlandProxy proxy.Proxy + host net.Addr + container net.Addr +} + +var ( + chain *iptables.Chain + lock sync.Mutex + + // udp:ip:port + currentMappings = make(map[string]*mapping) +) + +var ( + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + ErrPortMappedForIP = errors.New("port is already mapped to ip") + ErrPortNotMapped = errors.New("port is not mapped") +) + +func RegisterIpTablesChain(name, bridge string) error { + c, err := iptables.NewChain(name, bridge) + if err != nil { + return fmt.Errorf("failed to create %s chain: %s", name, err) + } + chain = c + return nil +} + +func RemoveIpTablesChain(name string) error { + if err := iptables.RemoveExistingChain(name); err != nil { + return err + } + chain = nil + return nil +} + +func Map(container net.Addr, hostIP net.IP, hostPort int) error { + lock.Lock() + defer lock.Unlock() + + var m *mapping + switch container.(type) { + case *net.TCPAddr: + m = &mapping{ + proto: "tcp", + host: &net.TCPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + case *net.UDPAddr: + m = &mapping{ + proto: "udp", + host: &net.UDPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + default: + return ErrUnknownBackendAddressType + } + + key := getKey(m.host) + if _, exists := currentMappings[key]; exists { + return ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + + p, err := proxy.NewProxy(m.host, m.container) + if err != nil { + // need to undo the iptables rules before we reutrn + forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) + return err + } + + m.userlandProxy = p + currentMappings[key] = m + + go p.Run() + + return nil +} + +func Unmap(host net.Addr) error { + lock.Lock() + defer lock.Unlock() + + key := getKey(host) + data, exists := currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + data.userlandProxy.Close() + delete(currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + return nil +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + } + return nil, 0 +} + +func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if chain == nil { + return nil + } + return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) +} From b3b12f00593d96c6d0e93f9e333ee368278e871d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 28 Jan 2014 16:28:32 -0800 Subject: [PATCH 09/33] Move port mapper tests out of core and into portmapper Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- network.go | 6 +- network_test.go | 72 ---------------- networkdriver/portmapper/mapper.go | 18 +--- networkdriver/portmapper/mapper_test.go | 107 ++++++++++++++++++++++++ proxy/stub_proxy.go | 22 +++++ 5 files changed, 136 insertions(+), 89 deletions(-) delete mode 100644 network_test.go create mode 100644 networkdriver/portmapper/mapper_test.go create mode 100644 proxy/stub_proxy.go diff --git a/network.go b/network.go index 05ff005ee6..c72ea12055 100644 --- a/network.go +++ b/network.go @@ -392,14 +392,16 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) { } // We can always try removing the iptables - if err := portmapper.RemoveIpTablesChain("DOCKER"); err != nil { + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { return nil, err } if config.EnableIptables { - if err := portmapper.RegisterIpTablesChain("DOCKER", config.BridgeIface); err != nil { + chain, err := iptables.NewChain("DOCKER", config.BridgeIface) + if err != nil { return nil, err } + portmapper.SetIptablesChain(chain) } manager := &NetworkManager{ diff --git a/network_test.go b/network_test.go deleted file mode 100644 index 6cdf50ab6e..0000000000 --- a/network_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/proxy" - "net" - "testing" -) - -type StubProxy struct { - frontendAddr *net.Addr - backendAddr *net.Addr -} - -func (proxy *StubProxy) Run() {} -func (proxy *StubProxy) Close() {} -func (proxy *StubProxy) FrontendAddr() net.Addr { return *proxy.frontendAddr } -func (proxy *StubProxy) BackendAddr() net.Addr { return *proxy.backendAddr } - -func NewStubProxy(frontendAddr, backendAddr net.Addr) (proxy.Proxy, error) { - return &StubProxy{ - frontendAddr: &frontendAddr, - backendAddr: &backendAddr, - }, nil -} - -func TestPortMapper(t *testing.T) { - // FIXME: is this iptables chain still used anywhere? - var chain *iptables.Chain - mapper := &PortMapper{ - tcpMapping: make(map[string]*net.TCPAddr), - tcpProxies: make(map[string]proxy.Proxy), - udpMapping: make(map[string]*net.UDPAddr), - udpProxies: make(map[string]proxy.Proxy), - iptables: chain, - defaultIp: net.IP("0.0.0.0"), - proxyFactoryFunc: NewStubProxy, - } - - dstIp1 := net.ParseIP("192.168.0.1") - dstIp2 := net.ParseIP("192.168.0.2") - srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} - srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} - - if err := mapper.Map(dstIp1, 80, srcAddr1); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if mapper.Map(dstIp1, 80, srcAddr1) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if mapper.Map(dstIp1, 80, srcAddr2) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if err := mapper.Map(dstIp2, 80, srcAddr2); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if mapper.Unmap(dstIp1, 80, "tcp") != nil { - t.Fatalf("Failed to release port") - } - - if mapper.Unmap(dstIp2, 80, "tcp") != nil { - t.Fatalf("Failed to release port") - } - - if mapper.Unmap(dstIp2, 80, "tcp") == nil { - t.Fatalf("Port already released, but no error reported") - } -} diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go index 74e2728ab6..f052c48143 100644 --- a/networkdriver/portmapper/mapper.go +++ b/networkdriver/portmapper/mapper.go @@ -22,6 +22,7 @@ var ( // udp:ip:port currentMappings = make(map[string]*mapping) + newProxy = proxy.NewProxy ) var ( @@ -30,21 +31,8 @@ var ( ErrPortNotMapped = errors.New("port is not mapped") ) -func RegisterIpTablesChain(name, bridge string) error { - c, err := iptables.NewChain(name, bridge) - if err != nil { - return fmt.Errorf("failed to create %s chain: %s", name, err) - } +func SetIptablesChain(c *iptables.Chain) { chain = c - return nil -} - -func RemoveIpTablesChain(name string) error { - if err := iptables.RemoveExistingChain(name); err != nil { - return err - } - chain = nil - return nil } func Map(container net.Addr, hostIP net.IP, hostPort int) error { @@ -79,7 +67,7 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) error { return err } - p, err := proxy.NewProxy(m.host, m.container) + p, err := newProxy(m.host, m.container) if err != nil { // need to undo the iptables rules before we reutrn forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) diff --git a/networkdriver/portmapper/mapper_test.go b/networkdriver/portmapper/mapper_test.go new file mode 100644 index 0000000000..05718063e3 --- /dev/null +++ b/networkdriver/portmapper/mapper_test.go @@ -0,0 +1,107 @@ +package portmapper + +import ( + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/proxy" + "net" + "testing" +) + +func init() { + // override this func to mock out the proxy server + newProxy = proxy.NewStubProxy +} + +func reset() { + chain = nil + currentMappings = make(map[string]*mapping) +} + +func TestSetIptablesChain(t *testing.T) { + defer reset() + + c := &iptables.Chain{ + Name: "TEST", + Bridge: "192.168.1.1", + } + + if chain != nil { + t.Fatal("chain should be nil at init") + } + + SetIptablesChain(c) + if chain == nil { + t.Fatal("chain should not be nil after set") + } +} + +func TestMapPorts(t *testing.T) { + dstIp1 := net.ParseIP("192.168.0.1") + dstIp2 := net.ParseIP("192.168.0.2") + dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} + dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} + + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} + + if err := Map(srcAddr1, dstIp1, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Map(srcAddr1, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if Map(srcAddr2, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if err := Map(srcAddr2, dstIp2, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Unmap(dstAddr1) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) == nil { + t.Fatalf("Port already released, but no error reported") + } +} + +func TestGetUDPKey(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + key := getKey(addr) + + if expected := "192.168.1.5:53/udp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetTCPKey(t *testing.T) { + addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} + + key := getKey(addr) + + if expected := "192.168.1.5:80/tcp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetUDPIPAndPort(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + ip, port := getIPAndPort(addr) + if expected := "192.168.1.5"; ip.String() != expected { + t.Fatalf("expected ip %s got %s", expected, ip) + } + + if ep := 53; port != ep { + t.Fatalf("expected port %d got %d", ep, port) + } +} diff --git a/proxy/stub_proxy.go b/proxy/stub_proxy.go new file mode 100644 index 0000000000..7684427058 --- /dev/null +++ b/proxy/stub_proxy.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "net" +) + +type StubProxy struct { + frontendAddr net.Addr + backendAddr net.Addr +} + +func (p *StubProxy) Run() {} +func (p *StubProxy) Close() {} +func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } +func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } + +func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { + return &StubProxy{ + frontendAddr: frontendAddr, + backendAddr: backendAddr, + }, nil +} From f3a032f27b80e8194cb7f14df848f063a56c5f26 Mon Sep 17 00:00:00 2001 From: Andy Rothfusz Date: Tue, 28 Jan 2014 17:32:05 -0800 Subject: [PATCH 10/33] Address feedback from @jamtur01. Docker-DCO-1.1-Signed-off-by: Andy Rothfusz (github: metalivedev) --- docs/sources/articles/runmetrics.rst | 26 ++- docs/sources/reference/run.rst | 260 +++++++++++++++++---------- 2 files changed, 173 insertions(+), 113 deletions(-) diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst index f7406bc5ed..afb7f82e39 100644 --- a/docs/sources/articles/runmetrics.rst +++ b/docs/sources/articles/runmetrics.rst @@ -37,8 +37,8 @@ To figure out where your control groups are mounted, you can run: .. _run_findpid: -Ennumerating Cgroups --------------------- +Enumerating Cgroups +------------------- You can look into ``/proc/cgroups`` to see the different control group subsystems known to the system, the hierarchy they belong to, and how @@ -71,7 +71,7 @@ container, take a look at ``/sys/fs/cgroup/memory/lxc//``. Metrics from Cgroups: Memory, CPU, Block IO ------------------------------------------- -For each subsystem (memory, cpu, and block i/o), you will find one or +For each subsystem (memory, CPU, and block I/O), you will find one or more pseudo-files containing statistics. Memory Metrics: ``memory.stat`` @@ -79,7 +79,7 @@ Memory Metrics: ``memory.stat`` Memory metrics are found in the "memory" cgroup. Note that the memory control group adds a little overhead, because it does very -fine-grained accounting of the memory usage on your system. Therefore, +fine-grained accounting of the memory usage on your host. Therefore, many distros chose to not enable it by default. Generally, to enable it, all you have to do is to add some kernel command-line parameters: ``cgroup_enable=memory swapaccount=1``. @@ -133,7 +133,7 @@ creation of the cgroup; this number can never decrease). cache the amount of memory used by the processes of this control group that can be associated precisely with a block on a block - device. When you read and write files from and to disk, this amount + device. When you read from and write to files on disk, this amount will increase. This will be the case if you use "conventional" I/O (``open``, ``read``, ``write`` syscalls) as well as mapped files (with ``mmap``). It also accounts for the memory used by ``tmpfs`` @@ -148,17 +148,11 @@ mapped_file control group. It doesn't give you information about *how much* memory is used; it rather tells you *how* it is used. -pgpgin and pgpgout - correspond to *charging events*. Each time a page is "charged" - (=added to the accounting) to a cgroup, pgpgin increases. When a - page is "uncharged" (=no longer "billed" to a cgroup), pgpgout - increases. - pgfault and pgmajfault indicate the number of times that a process of the cgroup triggered a "page fault" and a "major fault", respectively. A page fault happens when a process accesses a part of its virtual memory space - which is inexistent or protected. The former can happen if the + which is nonexistent or protected. The former can happen if the process is buggy and tries to access an invalid address (it will then be sent a ``SIGSEGV`` signal, typically killing it with the famous ``Segmentation fault`` message). The latter can happen when @@ -237,7 +231,7 @@ the processes were in direct control of the CPU (i.e. executing process code), and ``system`` is the time during which the CPU was executing system calls on behalf of those processes. -Those times are expressed in ticks of 1/100th of second. Actually, +Those times are expressed in ticks of 1/100th of a second. Actually, they are expressed in "user jiffies". There are ``USER_HZ`` *"jiffies"* per second, and on x86 systems, ``USER_HZ`` is 100. This used to map exactly to the number of scheduler "ticks" per second; but @@ -383,11 +377,11 @@ pseudo-files. (Symlinks are accepted.) In other words, to execute a command within the network namespace of a container, we need to: -* find out the PID of any process within the container that we want to +* Find out the PID of any process within the container that we want to investigate; -* create a symlink from ``/var/run/netns/`` to +* Create a symlink from ``/var/run/netns/`` to ``/proc//ns/net`` -* execute ``ip netns exec ....`` +* Execute ``ip netns exec ....`` Please review :ref:`run_findpid` to learn how to find the cgroup of a pprocess running in the container of which you want to measure network diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index 7505b7c02f..307edace00 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -21,6 +21,7 @@ Every one of the :ref:`example_list` shows running containers, and so here we try to give more in-depth guidance. .. contents:: Table of Contents + :depth: 2 .. _run_running: @@ -37,24 +38,33 @@ To learn how to interpret the types of ``[OPTIONS]``, see The list of ``[OPTIONS]`` breaks down into two groups: -* options that define the runtime behavior or environment, and -* options that override image defaults. +1. Settings exclusive to operators, including: -Since image defaults usually get set in :ref:`Dockerfiles -` (though they could also be set at :ref:`cli_commit` -time too), we will group the runtime options here by their related -Dockerfile commands so that it is easier to see how to override image -defaults and set new behavior. + * Detached or Foreground running, + * Container Identification, + * Network settings, and + * Runtime Constraints on CPU and Memory + * Privileges and LXC Configuration -We'll start, though, with the options that are unique to ``docker -run``, the options which define the runtime behavior or the container -environment. +2. Setting shared between operators and developers, where operators + can override defaults developers set in images at build time. -.. note:: The runtime operator always has final control over the - behavior of a Docker container. +Together, the ``docker run [OPTIONS]`` give complete control over +runtime behavior to the operator, allowing them to override all +defaults set by the developer during ``docker build`` and nearly all +the defaults set by the Docker runtime itself. -Detached or Foreground -====================== +Operator Exclusive Options +========================== + +Only the operator (the person executing ``docker run``) can set the +following options. + +.. contents:: + :local: + +Detached vs Foreground +---------------------- When starting a Docker container, you must first decide if you want to run the container in the background in a "detached" mode or in the @@ -65,7 +75,7 @@ default foreground mode:: Detached (-d) ............. -In detached mode (``-d=true`` or just ``-d``), all IO should be done +In detached mode (``-d=true`` or just ``-d``), all I/O should be done through network connections or shared volumes because the container is no longer listening to the commandline where you executed ``docker run``. You can reattach to a detached container with ``docker`` @@ -82,22 +92,68 @@ error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. All of that is configurable:: - -a=[] : Attach to stdin, stdout and/or stderr + -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` -t=false : Allocate a pseudo-tty -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -i=false : Keep stdin open even if not attached + -i=false : Keep STDIN open even if not attached If you do not specify ``-a`` then Docker will `attach everything (stdin,stdout,stderr) -`_. You -can specify which of the three standard streams (stdin, stdout, -stderr) you'd like to connect between your instead, as in:: +`_. You +can specify to which of the three standard streams (``stdin``, ``stdout``, +``stderr``) you'd like to connect instead, as in:: docker run -a stdin -a stdout -i -t ubuntu /bin/bash For interactive processes (like a shell) you will typically want a tty -as well as persistent standard in, so you'll use ``-i -t`` together in -most interactive cases. +as well as persistent standard input (``stdin``), so you'll use ``-i +-t`` together in most interactive cases. + +Container Identification +------------------------ + +Name (-name) +............ + +The operator can identify a container in three ways: + +* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") +* UUID short identifier ("f78375b1c487") +* Name ("evil_ptolemy") + +The UUID identifiers come from the Docker daemon, and if you do not +assign a name to the container with ``-name`` then the daemon will +also generate a random string name too. The name can become a handy +way to add meaning to a container since you can use this name when +defining :ref:`links ` (or any other place +you need to identify a container). This works for both background and +foreground Docker containers. + +PID Equivalent +.............. + +And finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how +some programs might write out their process ID to a file (you've seen +them as PID files):: + + -cidfile="": Write the container ID to the file + +Network Settings +---------------- + +:: + -n=true : Enable networking for this container + -dns=[] : Set custom dns servers for the container + +By default, all containers have networking enabled and they can make +any outgoing connections. The operator can completely disable +networking with ``docker run -n`` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +STDIN/STDOUT only. + +Your container will use the same DNS servers as the host by default, +but you can override this with ``-dns``. Clean Up (-rm) -------------- @@ -112,57 +168,84 @@ the container exits**, you can add the ``-rm`` flag:: -rm=false: Automatically remove the container when it exits (incompatible with -d) -Name (-name) -============ -The operator can identify a container in three ways: +Runtime Constraints on CPU and Memory +------------------------------------- -* UUID long identifier ("f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778") -* UUID short identifier ("f78375b1c487") -* name ("evil_ptolemy") +The operator can also adjust the performance parameters of the container:: -The UUID identifiers come from the Docker daemon, and if you do not -assign a name to the container with ``-name`` then the daemon will -also generate a random string name too. The name can become a handy -way to add meaning to a container since you can use this name when -defining :ref:`links ` (or any other place -you need to identify a container). This works for both background and -foreground Docker containers. + -m="": Memory limit (format: , where unit = b, k, m or g) + -c=0 : CPU shares (relative weight) -PID Equivalent -============== +The operator can constrain the memory available to a container easily +with ``docker run -m``. If the host supports swap memory, then the +``-m`` memory setting can be larger than physical RAM. -And finally, to help with automation, you can have Docker write the -container id out to a file of your choosing. This is similar to how -some programs might write out their process ID to a file (you've seen -them as .pid files):: +Similarly the operator can increase the priority of this container +with the ``-c`` option. By default, all containers run at the same +priority and get the same proportion of CPU cycles, but you can tell +the kernel to give more shares of CPU time to one or more containers +when you start them via Docker. - -cidfile="": Write the container ID to the file +Runtime Privilege and LXC Configuration +--------------------------------------- -Overriding Dockerfile Image Defaults -==================================== +:: + + -privileged=false: Give extended privileges to this container + -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is +because by default a container is not allowed to access any devices, +but a "privileged" container is given access to all devices (see +lxc-template.go_ and documentation on `cgroups devices +`_). + +When the operator executes ``docker run -privileged``, Docker will +enable to access to all devices on the host as well as set some +configuration in AppArmor to allow the container nearly all the same +access to the host as processes running outside containers on the +host. Additional information about running with ``-privileged`` is +available on the `Docker Blog +`_. + +An operator can also specify LXC options using one or more +``-lxc-conf`` parameters. These can be new parameters or override +existing parameters from the lxc-template.go_. Note that in the +future, a given host's Docker daemon may not use LXC, so this is an +implementation-specific configuration meant for operators already +familiar with using LXC directly. + +.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go + + +Overriding ``Dockerfile`` Image Defaults +======================================== When a developer builds an image from a :ref:`Dockerfile ` or when she commits it, the developer can set a number of default parameters that take effect when the image starts up as a container. -Four of the Dockerfile commands cannot be overridden at runtime: +Four of the ``Dockerfile`` commands cannot be overridden at runtime: ``FROM, MAINTAINER, RUN``, and ``ADD``. Everything else has a corresponding override in ``docker run``. We'll go through what the -developer might have set in each Dockerfile instruction and how the +developer might have set in each ``Dockerfile`` instruction and how the operator can override that setting. +.. contents:: + :local: -CMD -... +CMD (Default Command or Options) +-------------------------------- -Remember the optional ``COMMAND`` in the Docker commandline:: +Recall the optional ``COMMAND`` in the Docker commandline:: docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...] This command is optional because the person who created the ``IMAGE`` -may have already provided a default ``COMMAND`` using the Dockerfile +may have already provided a default ``COMMAND`` using the ``Dockerfile`` ``CMD``. As the operator (the person running a container from the image), you can override that ``CMD`` just by specifying a new ``COMMAND``. @@ -171,22 +254,22 @@ If the image also specifies an ``ENTRYPOINT`` then the ``CMD`` or ``COMMAND`` get appended as arguments to the ``ENTRYPOINT``. -ENTRYPOINT -.......... +ENTRYPOINT (Default Command to Execute at Runtime +------------------------------------------------- :: -entrypoint="": Overwrite the default entrypoint set by the image -The ENTRYPOINT of an image is similar to a COMMAND because it +The ENTRYPOINT of an image is similar to a ``COMMAND`` because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a +(purposely) more difficult to override. The ``ENTRYPOINT`` gives a container its default nature or behavior, so that when you set an -ENTRYPOINT you can run the container *as if it were that binary*, +``ENTRYPOINT`` you can run the container *as if it were that binary*, complete with default options, and you can pass in more options via -the COMMAND. But, sometimes an operator may want to run something else -inside the container, so you can override the default ENTRYPOINT at -runtime by using a string to specify the new ENTRYPOINT. Here is an +the ``COMMAND``. But, sometimes an operator may want to run something else +inside the container, so you can override the default ``ENTRYPOINT`` at +runtime by using a string to specify the new ``ENTRYPOINT``. Here is an example of how to run a shell in a container that has been set up to automatically run something else (like ``/usr/bin/redis-server``):: @@ -198,16 +281,14 @@ or two examples of how to pass more parameters to that ENTRYPOINT:: docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help -EXPOSE (``run`` Networking Options) -................................... +EXPOSE (Incoming Ports) +----------------------- -The *Dockerfile* doesn't give much control over networking, only -providing the EXPOSE instruction to give a hint to the operator about -what incoming ports might provide services. At runtime, however, -Docker provides a number of ``run`` options related to networking:: +The ``Dockerfile`` doesn't give much control over networking, only +providing the ``EXPOSE`` instruction to give a hint to the operator +about what incoming ports might provide services. The following +options work with or override the ``Dockerfile``'s exposed defaults:: - -n=true : Enable networking for this container - -dns=[] : Set custom dns servers for the container -expose=[]: Expose a port from the container without publishing it to your host -P=false : Publish all exposed ports to the host interfaces @@ -217,25 +298,16 @@ Docker provides a number of ``run`` options related to networking:: (use 'docker port' to see the actual mapping) -link="" : Add link to another container (name:alias) -By default, all containers have networking enabled and they can make -any outgoing connections. The operator can completely disable -networking with ``run -n`` which disables all incoming and outgoing -networking. In cases like this, you would perform IO through files or -stdin/stdout only. - -Your container will use the same DNS servers as the host by default, -but you can override this with ``-dns``. - As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port available **in** a container for incoming connections. The port number on the inside of the container (where the service listens) does not need to be the same number as the port exposed on the outside of the container (where clients connect), so inside the container you might have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in -the Dockerfile), but outside the container the port might be 42800. +the ``Dockerfile``), but outside the container the port might be 42800. To help a new client container reach the server container's internal -port operator ``-expose'd`` by the operator or ``EXPOSE'd`` by the +port operator ``-expose``'d by the operator or ``EXPOSE``'d by the developer, the operator has three choices: start the server container with ``-P`` or ``-p,`` or start the client container with ``-link``. @@ -250,10 +322,10 @@ networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. ENV (Environment Variables) -........................... +--------------------------- The operator can **set any environment variable** in the container by -using one or more ``-e``, even overriding those already defined by the +using one or more ``-e`` flags, even overriding those already defined by the developer with a Dockefile ``ENV``:: $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export @@ -287,7 +359,9 @@ container. Let's imagine we have a container running Redis:: 2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f -Yet we can get information about the redis container's exposed ports with ``-link``. Choose an alias that will form a valid environment variable! +Yet we can get information about the Redis container's exposed ports +with ``-link``. Choose an alias that will form a valid environment +variable! :: @@ -312,7 +386,7 @@ And we can use that information to connect from another container as a client:: 172.17.0.32:6379> VOLUME (Shared Filesystems) -........................... +--------------------------- :: @@ -322,32 +396,24 @@ VOLUME (Shared Filesystems) The volumes commands are complex enough to have their own documentation in section :ref:`volume_def`. A developer can define one -or more VOLUMEs associated with an image, but only the operator can +or more ``VOLUME``\s associated with an image, but only the operator can give access from one container to another (or from a container to a volume mounted on the host). USER -.... +---- -:: +The default user within a container is ``root`` (id = 0), but if the +developer created additional users, those are accessible too. The +developer can set a default user to run the first process with the +``Dockerfile USER`` command, but the operator can override it :: -u="": Username or UID WORKDIR -....... +------- -:: +The default working directory for running binaries within a container is the root directory (``/``), but the developer can set a different default with the ``Dockerfile WORKDIR`` command. The operator can override this with:: -w="": Working directory inside the container -Performance -=========== - -The operator can also adjust the performance parameters of the container:: - - -c=0 : CPU shares (relative weight) - -m="": Memory limit (format: , where unit = b, k, m or g) - - -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" - -privileged=false: Give extended privileges to this container - From 6cfe778c2cb6359b47cf646dc12824b58cc8d887 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 29 Jan 2014 00:10:39 -0700 Subject: [PATCH 11/33] Make get.docker.io install directions slightly better by telling user exactly how to add themselves to the "docker" group As a bonus, if the user has run this script the way we recommend (ie, without "sudo" or "su", run as their user), we can actually give them the exact command they'll need with their correct username substituted appropriately. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/install.sh | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/hack/install.sh b/hack/install.sh index 02d812f388..65e34f9659 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -37,8 +37,10 @@ if command_exists docker || command_exists lxc-docker; then ( set -x; sleep 20 ) fi +user="$(id -un 2>/dev/null || true)" + sh_c='sh -c' -if [ "$(whoami 2>/dev/null || true)" != 'root' ]; then +if [ "$user" != 'root' ]; then if command_exists sudo; then sh_c='sudo sh -c' elif command_exists su; then @@ -124,6 +126,16 @@ case "$lsb_dist" in $sh_c 'docker run busybox echo "Docker has been successfully installed!"' ) || true fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo exit 0 ;; From f09a78cd219b24d4308034c8dd13410cfe5fbec7 Mon Sep 17 00:00:00 2001 From: Lokesh Mandvekar Date: Thu, 9 Jan 2014 12:09:25 -0500 Subject: [PATCH 12/33] ExecStartPre commands updated Docker-DCO-1.1-Signed-off-by: Lokesh Mandvekar (github: lsm5) systemd service no longer does '/bin/mount/ --make-rprivate /'. Core issue fixed by Alex Larsson (commit 157d99a). ip forwarding enabled. --- contrib/init/systemd/docker.service | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service index aae7b6daf9..dc7cc450dd 100644 --- a/contrib/init/systemd/docker.service +++ b/contrib/init/systemd/docker.service @@ -1,11 +1,13 @@ [Unit] -Description=Docker Application Container Engine +Description=Docker Application Container Engine Documentation=http://docs.docker.io After=network.target [Service] -ExecStartPre=/bin/mount --make-rprivate / +Type=simple +ExecStartPre=/usr/sbin/sysctl -w net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1 ExecStart=/usr/bin/docker -d +Restart=on-failure [Install] WantedBy=multi-user.target From a9d0bbcfc6e45817f39f15a7c9b11305b4c04bad Mon Sep 17 00:00:00 2001 From: Lokesh Mandvekar Date: Wed, 29 Jan 2014 11:05:11 -0500 Subject: [PATCH 13/33] remove ip fowarding from systemd (fixed: commit #3801) Docker-DCO-1.1-Signed-off-by: Lokesh Mandvekar (github: lsm5) --- contrib/init/systemd/docker.service | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service index dc7cc450dd..387be2eb1c 100644 --- a/contrib/init/systemd/docker.service +++ b/contrib/init/systemd/docker.service @@ -4,8 +4,6 @@ Documentation=http://docs.docker.io After=network.target [Service] -Type=simple -ExecStartPre=/usr/sbin/sysctl -w net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1 ExecStart=/usr/bin/docker -d Restart=on-failure From 5b82a1b726291d4aaaad797df5b29a74de28d318 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 00:45:55 +0000 Subject: [PATCH 14/33] add tests Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- integration/server_test.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/integration/server_test.go b/integration/server_test.go index 2666d1d4fe..b0ad3d903b 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -114,6 +114,30 @@ func TestCreateRm(t *testing.T) { } +func TestCreateNumberHostname(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config, _, _, err := docker.ParseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + createTestContainer(eng, config, t) +} + +func TestCreateNumberUsername(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + + config, _, _, err := docker.ParseRun([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) + if err != nil { + t.Fatal(err) + } + + createTestContainer(eng, config, t) +} + func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() From 187646127fa80a5ba39a53619b410eb2a13f0ffd Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 00:56:42 +0000 Subject: [PATCH 15/33] fix convertion issues Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- container.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++ engine/env.go | 18 ---------------- engine/env_test.go | 26 ---------------------- engine/job.go | 4 ---- server.go | 18 +++++----------- 5 files changed, 59 insertions(+), 61 deletions(-) diff --git a/container.go b/container.go index c5df1f4b58..c98982b111 100644 --- a/container.go +++ b/container.go @@ -104,6 +104,44 @@ type Config struct { NetworkDisabled bool } +func ContainerConfigFromJob(job *engine.Job) *Config { + var config Config + config.Hostname = job.Getenv("Hostname") + config.Domainname = job.Getenv("Domainname") + config.User = job.Getenv("User") + config.Memory = job.GetenvInt64("Memory") + config.MemorySwap = job.GetenvInt64("MemorySwap") + config.CpuShares = job.GetenvInt64("CpuShares") + config.AttachStdin = job.GetenvBool("AttachStdin") + config.AttachStdout = job.GetenvBool("AttachStdout") + config.AttachStderr = job.GetenvBool("AttachStderr") + if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { + config.PortSpecs = PortSpecs + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + config.Tty = job.GetenvBool("Tty") + config.OpenStdin = job.GetenvBool("OpenStdin") + config.StdinOnce = job.GetenvBool("StdinOnce") + if Env := job.GetenvList("Env"); Env != nil { + config.Env = Env + } + if Cmd := job.GetenvList("Cmd"); Cmd != nil { + config.Cmd = Cmd + } + if Dns := job.GetenvList("Dns"); Dns != nil { + config.Dns = Dns + } + config.Image = job.Getenv("Image") + job.GetenvJson("Volumes", &config.Volumes) + config.VolumesFrom = job.Getenv("VolumesFrom") + config.WorkingDir = job.Getenv("WorkingDir") + if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { + config.Entrypoint = Entrypoint + } + config.NetworkDisabled = job.GetenvBool("NetworkDisabled") + return &config +} + type HostConfig struct { Binds []string ContainerIDFile string @@ -114,6 +152,22 @@ type HostConfig struct { PublishAllPorts bool } +func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { + var hostConfig HostConfig + if Binds := job.GetenvList("Binds"); Binds != nil { + hostConfig.Binds = Binds + } + hostConfig.ContainerIDFile = job.Getenv("ContainerIDFile") + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + hostConfig.Privileged = job.GetenvBool("Privileged") + job.GetenvJson("PortBindings", &hostConfig.PortBindings) + if Links := job.GetenvList("Links"); Links != nil { + hostConfig.Links = Links + } + hostConfig.PublishAllPorts = job.GetenvBool("PublishAllPorts") + return &hostConfig +} + type BindMap struct { SrcPath string DstPath string diff --git a/engine/env.go b/engine/env.go index f93555a40b..e000fe26b1 100644 --- a/engine/env.go +++ b/engine/env.go @@ -191,24 +191,6 @@ func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { return 0, env.Encode(dst) } -func (env *Env) Export(dst interface{}) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("ExportEnv %s", err) - } - }() - var buf bytes.Buffer - // step 1: encode/marshal the env to an intermediary json representation - if err := env.Encode(&buf); err != nil { - return err - } - // step 2: decode/unmarshal the intermediary json into the destination object - if err := json.NewDecoder(&buf).Decode(dst); err != nil { - return err - } - return nil -} - func (env *Env) Import(src interface{}) (err error) { defer func() { if err != nil { diff --git a/engine/env_test.go b/engine/env_test.go index 24c5992dd0..419c47491e 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -84,32 +84,6 @@ func TestSetenvList(t *testing.T) { } } -func TestImportEnv(t *testing.T) { - type dummy struct { - DummyInt int - DummyStringArray []string - } - - job := mkJob(t, "dummy") - if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil { - t.Fatal(err) - } - - dmy := dummy{} - if err := job.ExportEnv(&dmy); err != nil { - t.Fatal(err) - } - - if dmy.DummyInt != 42 { - t.Fatalf("Expected 42, got %d", dmy.DummyInt) - } - - if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" { - t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray) - } - -} - func TestEnviron(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") diff --git a/engine/job.go b/engine/job.go index 179b2ebdda..181eaa80c7 100644 --- a/engine/job.go +++ b/engine/job.go @@ -164,10 +164,6 @@ func (job *Job) EncodeEnv(dst io.Writer) error { return job.env.Encode(dst) } -func (job *Job) ExportEnv(dst interface{}) (err error) { - return job.env.Export(dst) -} - func (job *Job) ImportEnv(src interface{}) (err error) { return job.env.Import(src) } diff --git a/server.go b/server.go index a6731842cc..5a3b999c43 100644 --- a/server.go +++ b/server.go @@ -1742,11 +1742,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { job.Printf("Usage: %s", job.Name) return engine.StatusErr } - var config Config - if err := job.ExportEnv(&config); err != nil { - job.Error(err) - return engine.StatusErr - } + config := ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { job.Errorf("Minimum memory limit allowed is 512k") return engine.StatusErr @@ -1769,7 +1765,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { config.Dns = defaultDns } - container, buildWarnings, err := srv.runtime.Create(&config, name) + container, buildWarnings, err := srv.runtime.Create(config, name) if err != nil { if srv.runtime.graph.IsNotExist(err) { _, tag := utils.ParseRepositoryTag(config.Image) @@ -2196,11 +2192,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { - var hostConfig HostConfig - if err := job.ExportEnv(&hostConfig); err != nil { - job.Error(err) - return engine.StatusErr - } + hostConfig := ContainerHostConfigFromJob(job) // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. @@ -2227,11 +2219,11 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { } } // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, &hostConfig); err != nil { + if err := srv.RegisterLinks(container, hostConfig); err != nil { job.Error(err) return engine.StatusErr } - container.hostConfig = &hostConfig + container.hostConfig = hostConfig container.ToDisk() } if err := container.Start(); err != nil { From 4dab34c2045b7bcc735227867126f0796cbb09c1 Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Wed, 29 Jan 2014 17:41:21 -0800 Subject: [PATCH 16/33] contrib: systemd: make socket-activation Restart=always Do as was done to f09a78cd219b24d4308034c8dd13410cfe5fbec7 in the socket-activation example. Docker-DCO-1.1-Signed-off-by: Brandon Philips (github: philips) --- contrib/init/systemd/socket-activation/docker.service | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/init/systemd/socket-activation/docker.service b/contrib/init/systemd/socket-activation/docker.service index 4ab92dfef8..c795f9c3b4 100644 --- a/contrib/init/systemd/socket-activation/docker.service +++ b/contrib/init/systemd/socket-activation/docker.service @@ -5,6 +5,7 @@ After=network.target [Service] ExecStart=/usr/bin/docker -d -H fd:// +Restart=on-failure [Install] WantedBy=multi-user.target From 7d95ce6ddda89a392b94c6dcfcb7774b2a19cf1a Mon Sep 17 00:00:00 2001 From: Piergiuliano Bossi Date: Wed, 29 Jan 2014 23:07:23 -0500 Subject: [PATCH 17/33] Fix typo Docker-DCO-1.1-Signed-off-by: Piergiuliano Bossi (github: thinkingbox) --- docs/sources/installation/ubuntulinux.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index d5e4a248ba..4f4c89386f 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -224,7 +224,7 @@ Docker and UFW ^^^^^^^^^^^^^^ Docker uses a bridge to manage container networking. By default, UFW drops all -`forwarding` traffic. As a result will you need to enable UFW forwarding: +`forwarding` traffic. As a result you will need to enable UFW forwarding: .. code-block:: bash From 626a2e1112b6e802415f80ff7a3682296636f55a Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 30 Jan 2014 20:53:27 +1000 Subject: [PATCH 18/33] Add troubleshooting for missing cgroups on mint 16 #3602 Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/ubuntulinux.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 4f4c89386f..3d6ee6415d 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -217,6 +217,15 @@ To install the latest version of docker, use the standard ``apt-get`` method: # install the latest sudo apt-get install lxc-docker +Troubleshooting +^^^^^^^^^^^^^^^ + +On Linux Mint, the ``cgroups-lite`` package is not installed by default. +Before Docker will work correctly, you will need to install this via: + +.. code-block:: bash + + sudo apt-get update && sudo apt-get install cgroups-lite .. _ufw: From cc382ec62848855f6fe7302fe7a37ac62af35a9c Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 30 Jan 2014 11:03:25 -0600 Subject: [PATCH 19/33] network: insert masq rule This fixes IP masquerading on systems with reject rules at the end of the POSTROUTING table, by inserting the rule at the beginning of the table instead of adding it at the end. Docker-DCO-1.1-Signed-off-by: Josh Poimboeuf (github: jpoimboe) --- network.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network.go b/network.go index c72ea12055..d9771ac008 100644 --- a/network.go +++ b/network.go @@ -327,7 +327,7 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) { natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} if !iptables.Exists(natArgs...) { - if output, err := iptables.Raw(append([]string{"-A"}, natArgs...)...); err != nil { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { return nil, fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { return nil, fmt.Errorf("Error iptables postrouting: %s", output) From 238dba831a74c89fe8f5aabc836e6b75da577c97 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 30 Jan 2014 11:51:25 -0700 Subject: [PATCH 20/33] Make unclejack the official Vagrantfile maintainer Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 4a6c0ec22c..895fba563a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6,4 +6,4 @@ Michael Crosby (@crosbymichael) api.go: Victor Vieux (@vieux) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) -Vagrantfile: Daniel Mizyrycki (@mzdaniel) +Vagrantfile: Cristian Staretu (@unclejack) From c8d1596902704076c8d85a30e80f90f8efb3e175 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 30 Jan 2014 12:27:45 -0700 Subject: [PATCH 21/33] Fix fun Travis DCO check YAML parsing issues (especially with commit messages that start with any kind of whitespace, like this one intentionally does) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/travis/dco.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hack/travis/dco.py b/hack/travis/dco.py index d80d528f9a..f873940815 100755 --- a/hack/travis/dco.py +++ b/hack/travis/dco.py @@ -5,7 +5,7 @@ import yaml from env import commit_range -commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2)%B' +commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2).%B' gitlog = subprocess.check_output([ 'git', 'log', '--reverse', @@ -24,6 +24,11 @@ p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.e failed_commits = 0 for commit in commits: + commit['message'] = commit['message'][1:] + # trim off our '.' that exists just to prevent fun YAML parsing issues + # see https://github.com/dotcloud/docker/pull/3836#issuecomment-33723094 + # and https://travis-ci.org/dotcloud/docker/builds/17926783 + commit['stat'] = subprocess.check_output([ 'git', 'log', '--format=format:', '--max-count=1', '--name-status', commit['hash'], '--', From 9261511aa509423432f4e22f6235719deadc0969 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 20:45:32 +0000 Subject: [PATCH 22/33] refactor all *FromJob functions Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- config.go | 26 ++++++++++++----------- container.go | 58 ++++++++++++++++++++++++++++------------------------ server.go | 3 +-- 3 files changed, 46 insertions(+), 41 deletions(-) diff --git a/config.go b/config.go index aad5d50fc0..ac34589640 100644 --- a/config.go +++ b/config.go @@ -23,29 +23,31 @@ type DaemonConfig struct { // ConfigFromJob creates and returns a new DaemonConfig object // by parsing the contents of a job's environment. -func ConfigFromJob(job *engine.Job) *DaemonConfig { - var config DaemonConfig - config.Pidfile = job.Getenv("Pidfile") - config.Root = job.Getenv("Root") - config.AutoRestart = job.GetenvBool("AutoRestart") +func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { + config := &DaemonConfig{ + Pidfile: job.Getenv("Pidfile"), + Root: job.Getenv("Root"), + AutoRestart: job.GetenvBool("AutoRestart"), + EnableIptables: job.GetenvBool("EnableIptables"), + EnableIpForward: job.GetenvBool("EnableIpForward"), + BridgeIp: job.Getenv("BridgeIp"), + DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), + InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), + GraphDriver: job.Getenv("GraphDriver"), + } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } - config.EnableIptables = job.GetenvBool("EnableIptables") - config.EnableIpForward = job.GetenvBool("EnableIpForward") if br := job.Getenv("BridgeIface"); br != "" { config.BridgeIface = br } else { config.BridgeIface = DefaultNetworkBridge } - config.BridgeIp = job.Getenv("BridgeIp") - config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp")) - config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication") - config.GraphDriver = job.Getenv("GraphDriver") if mtu := job.GetenvInt("Mtu"); mtu != -1 { config.Mtu = mtu } else { config.Mtu = DefaultNetworkMtu } - return &config + + return config } diff --git a/container.go b/container.go index c98982b111..95e81e2063 100644 --- a/container.go +++ b/container.go @@ -105,23 +105,29 @@ type Config struct { } func ContainerConfigFromJob(job *engine.Job) *Config { - var config Config - config.Hostname = job.Getenv("Hostname") - config.Domainname = job.Getenv("Domainname") - config.User = job.Getenv("User") - config.Memory = job.GetenvInt64("Memory") - config.MemorySwap = job.GetenvInt64("MemorySwap") - config.CpuShares = job.GetenvInt64("CpuShares") - config.AttachStdin = job.GetenvBool("AttachStdin") - config.AttachStdout = job.GetenvBool("AttachStdout") - config.AttachStderr = job.GetenvBool("AttachStderr") + config := &Config{ + Hostname: job.Getenv("Hostname"), + Domainname: job.Getenv("Domainname"), + User: job.Getenv("User"), + Memory: job.GetenvInt64("Memory"), + MemorySwap: job.GetenvInt64("MemorySwap"), + CpuShares: job.GetenvInt64("CpuShares"), + AttachStdin: job.GetenvBool("AttachStdin"), + AttachStdout: job.GetenvBool("AttachStdout"), + AttachStderr: job.GetenvBool("AttachStderr"), + Tty: job.GetenvBool("Tty"), + OpenStdin: job.GetenvBool("OpenStdin"), + StdinOnce: job.GetenvBool("StdinOnce"), + Image: job.Getenv("Image"), + VolumesFrom: job.Getenv("VolumesFrom"), + WorkingDir: job.Getenv("WorkingDir"), + NetworkDisabled: job.GetenvBool("NetworkDisabled"), + } + job.GetenvJson("ExposedPorts", &config.ExposedPorts) + job.GetenvJson("Volumes", &config.Volumes) if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } - job.GetenvJson("ExposedPorts", &config.ExposedPorts) - config.Tty = job.GetenvBool("Tty") - config.OpenStdin = job.GetenvBool("OpenStdin") - config.StdinOnce = job.GetenvBool("StdinOnce") if Env := job.GetenvList("Env"); Env != nil { config.Env = Env } @@ -131,15 +137,11 @@ func ContainerConfigFromJob(job *engine.Job) *Config { if Dns := job.GetenvList("Dns"); Dns != nil { config.Dns = Dns } - config.Image = job.Getenv("Image") - job.GetenvJson("Volumes", &config.Volumes) - config.VolumesFrom = job.Getenv("VolumesFrom") - config.WorkingDir = job.Getenv("WorkingDir") if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } - config.NetworkDisabled = job.GetenvBool("NetworkDisabled") - return &config + + return config } type HostConfig struct { @@ -153,19 +155,21 @@ type HostConfig struct { } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { - var hostConfig HostConfig + hostConfig := &HostConfig{ + ContainerIDFile: job.Getenv("ContainerIDFile"), + Privileged: job.GetenvBool("Privileged"), + PublishAllPorts: job.GetenvBool("PublishAllPorts"), + } + job.GetenvJson("LxcConf", &hostConfig.LxcConf) + job.GetenvJson("PortBindings", &hostConfig.PortBindings) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } - hostConfig.ContainerIDFile = job.Getenv("ContainerIDFile") - job.GetenvJson("LxcConf", &hostConfig.LxcConf) - hostConfig.Privileged = job.GetenvBool("Privileged") - job.GetenvJson("PortBindings", &hostConfig.PortBindings) if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } - hostConfig.PublishAllPorts = job.GetenvBool("PublishAllPorts") - return &hostConfig + + return hostConfig } type BindMap struct { diff --git a/server.go b/server.go index 5a3b999c43..06e808e117 100644 --- a/server.go +++ b/server.go @@ -42,8 +42,7 @@ func init() { // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. func jobInitApi(job *engine.Job) engine.Status { job.Logf("Creating server") - // FIXME: ImportEnv deprecates ConfigFromJob - srv, err := NewServer(job.Eng, ConfigFromJob(job)) + srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) if err != nil { job.Error(err) return engine.StatusErr From 8e619e13ca3906e849944a4b015b676b93c9f145 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 21:53:32 +0000 Subject: [PATCH 23/33] remove TestAllocateTCPPortLocalhost faillure in tests Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index 06e808e117..27c27f25de 100644 --- a/server.go +++ b/server.go @@ -1742,7 +1742,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { return engine.StatusErr } config := ContainerConfigFromJob(job) - if config.Memory != 0 && config.Memory < 524288 { + if config.Memory > 0 && config.Memory < 524288 { job.Errorf("Minimum memory limit allowed is 512k") return engine.StatusErr } From 720f64af1855235b6f518ad97dbb89eb61191222 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 22:59:21 +0000 Subject: [PATCH 24/33] fix TestExitCode Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- execdriver/lxc/driver.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go index 4e8f586f82..11ee3b283f 100644 --- a/execdriver/lxc/driver.go +++ b/execdriver/lxc/driver.go @@ -155,7 +155,9 @@ func (d *driver) Run(c *execdriver.Command, startCallback execdriver.StartCallba ) go func() { if err := c.Wait(); err != nil { - waitErr = err + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } } close(waitLock) }() From 2655a108e1950534f0fa1d6034ab61444fe35ee5 Mon Sep 17 00:00:00 2001 From: "Roberto G. Hashioka" Date: Thu, 30 Jan 2014 23:21:42 +0000 Subject: [PATCH 25/33] - Fixed the last cli.call's parameter from CmdSearch Docker-DCO-1.1-Signed-off-by: Roberto Hashioka (github: rogaha) --- commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands.go b/commands.go index 083ca39bc5..ff7691c916 100644 --- a/commands.go +++ b/commands.go @@ -1674,7 +1674,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { v := url.Values{} v.Set("term", cmd.Arg(0)) - body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, false)) + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) if err != nil { return err From 4326e541f843e5c053221f15fef546b42ba29e25 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 30 Jan 2014 23:50:23 +0000 Subject: [PATCH 26/33] add make test-integration Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 275f9dc84c..168707a80f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) @@ -25,6 +25,9 @@ docs-shell: docs-build test: build $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration +test-integration: build + $(DOCKER_RUN_DOCKER) hack/make.sh test-integration + shell: build $(DOCKER_RUN_DOCKER) bash From ce423cc9a8b9552ee8bb75f3aac81d291f85375c Mon Sep 17 00:00:00 2001 From: Rafal Jeczalik Date: Sun, 8 Dec 2013 16:45:12 +0100 Subject: [PATCH 27/33] vagrant: added PRIVATE_NETWORK env var Docker-DCO-1.1-Signed-off-by: Rafal Jeczalik (github: rjeczalik) --- Vagrantfile | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index c130587829..f709031fdf 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -8,10 +8,9 @@ AWS_BOX_URI = ENV['BOX_URI'] || "https://github.com/mitchellh/vagrant-aws/raw/ma AWS_REGION = ENV['AWS_REGION'] || "us-east-1" AWS_AMI = ENV['AWS_AMI'] || "ami-69f5a900" AWS_INSTANCE_TYPE = ENV['AWS_INSTANCE_TYPE'] || 't1.micro' - FORWARD_DOCKER_PORTS = ENV['FORWARD_DOCKER_PORTS'] - -SSH_PRIVKEY_PATH = ENV["SSH_PRIVKEY_PATH"] +SSH_PRIVKEY_PATH = ENV['SSH_PRIVKEY_PATH'] +PRIVATE_NETWORK = ENV['PRIVATE_NETWORK'] # A script to upgrade from the 12.04 kernel to the raring backport kernel (3.8) # and install docker. @@ -174,3 +173,14 @@ if !FORWARD_DOCKER_PORTS.nil? end end end + +if !PRIVATE_NETWORK.nil? + Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config| + config.vm.network :hostonly, PRIVATE_NETWORK + end + + Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config| + config.vm.network "private_network", ip: PRIVATE_NETWORK + end +end + From 28b5ae8cc4492f7b3cc2eb2b30b0f41713822b25 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 31 Jan 2014 02:06:08 +0000 Subject: [PATCH 28/33] changed default value of getenvint to 0. fix tests Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api.go | 12 ++++++++++-- config.go | 2 +- engine/env.go | 2 +- engine/env_test.go | 2 +- server.go | 6 +++--- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api.go b/api.go index 0a7f7abea7..79df0f28fc 100644 --- a/api.go +++ b/api.go @@ -584,7 +584,11 @@ func postContainersRestart(srv *Server, version float64, w http.ResponseWriter, return fmt.Errorf("Missing parameter") } job := srv.Eng.Job("restart", vars["name"]) - job.Setenv("t", r.Form.Get("t")) + if r.Form.Get("t") == "" { + job.Setenv("t", "-1") + } else { + job.Setenv("t", r.Form.Get("t")) + } if err := job.Run(); err != nil { return err } @@ -652,7 +656,11 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r * return fmt.Errorf("Missing parameter") } job := srv.Eng.Job("stop", vars["name"]) - job.Setenv("t", r.Form.Get("t")) + if r.Form.Get("t") == "" { + job.Setenv("t", "-1") + } else { + job.Setenv("t", r.Form.Get("t")) + } if err := job.Run(); err != nil { return err } diff --git a/config.go b/config.go index ac34589640..cb7e985ca2 100644 --- a/config.go +++ b/config.go @@ -43,7 +43,7 @@ func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { } else { config.BridgeIface = DefaultNetworkBridge } - if mtu := job.GetenvInt("Mtu"); mtu != -1 { + if mtu := job.GetenvInt("Mtu"); mtu != 0 { config.Mtu = mtu } else { config.Mtu = DefaultNetworkMtu diff --git a/engine/env.go b/engine/env.go index e000fe26b1..d6ca4ec07b 100644 --- a/engine/env.go +++ b/engine/env.go @@ -60,7 +60,7 @@ func (env *Env) GetInt64(key string) int64 { s := strings.Trim(env.Get(key), " \t") val, err := strconv.ParseInt(s, 10, 64) if err != nil { - return -1 + return 0 } return val } diff --git a/engine/env_test.go b/engine/env_test.go index 419c47491e..c7079ff942 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -62,7 +62,7 @@ func TestSetenvInt(t *testing.T) { if val := job.GetenvInt("bar"); val != 42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } - if val := job.GetenvInt("nonexistent"); val != -1 { + if val := job.GetenvInt("nonexistent"); val != 0 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } } diff --git a/server.go b/server.go index 27c27f25de..6291da6699 100644 --- a/server.go +++ b/server.go @@ -1083,7 +1083,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { }, -1) for _, container := range srv.runtime.List() { - if !container.State.IsRunning() && !all && n == -1 && since == "" && before == "" { + if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { continue } if before != "" && !foundBefore { @@ -1092,7 +1092,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { } continue } - if displayed == n { + if n > 0 && displayed == n { break } if container.ID == since || utils.TruncateID(container.ID) == since { @@ -1742,7 +1742,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { return engine.StatusErr } config := ContainerConfigFromJob(job) - if config.Memory > 0 && config.Memory < 524288 { + if config.Memory != 0 && config.Memory < 524288 { job.Errorf("Minimum memory limit allowed is 512k") return engine.StatusErr } From 1498cd4e0540c73546a4847948f7d6a75b596178 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 31 Jan 2014 02:21:59 +0000 Subject: [PATCH 29/33] use exists Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api.go | 12 ++---------- engine/job.go | 4 ++++ server.go | 20 ++++++++++++-------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/api.go b/api.go index 79df0f28fc..0a7f7abea7 100644 --- a/api.go +++ b/api.go @@ -584,11 +584,7 @@ func postContainersRestart(srv *Server, version float64, w http.ResponseWriter, return fmt.Errorf("Missing parameter") } job := srv.Eng.Job("restart", vars["name"]) - if r.Form.Get("t") == "" { - job.Setenv("t", "-1") - } else { - job.Setenv("t", r.Form.Get("t")) - } + job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } @@ -656,11 +652,7 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r * return fmt.Errorf("Missing parameter") } job := srv.Eng.Job("stop", vars["name"]) - if r.Form.Get("t") == "" { - job.Setenv("t", "-1") - } else { - job.Setenv("t", r.Form.Get("t")) - } + job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } diff --git a/engine/job.go b/engine/job.go index 181eaa80c7..c2eadccc1b 100644 --- a/engine/job.go +++ b/engine/job.go @@ -102,6 +102,10 @@ func (job *Job) String() string { return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) } +func (job *Job) EnvExists(key string) (value bool) { + return job.env.Exists(key) +} + func (job *Job) Getenv(key string) (value string) { return job.env.Get(key) } diff --git a/server.go b/server.go index 6291da6699..90a8bb3ed8 100644 --- a/server.go +++ b/server.go @@ -1798,10 +1798,12 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { job.Errorf("Usage: %s CONTAINER\n", job.Name) return engine.StatusErr } - name := job.Args[0] - t := job.GetenvInt("t") - if t == -1 { - t = 10 + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") } if container := srv.runtime.Get(name); container != nil { if err := container.Restart(int(t)); err != nil { @@ -2239,10 +2241,12 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status { job.Errorf("Usage: %s CONTAINER\n", job.Name) return engine.StatusErr } - name := job.Args[0] - t := job.GetenvInt("t") - if t == -1 { - t = 10 + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") } if container := srv.runtime.Get(name); container != nil { if err := container.Stop(int(t)); err != nil { From 2b52d6e801dd888e1f5759448da025e0ddcffedd Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 24 Jan 2014 23:15:40 -0800 Subject: [PATCH 30/33] Remove api_params.go Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api.go | 69 ++++++++++++++++-------------- api_params.go | 43 ------------------- commands.go | 95 +++++++++++++++++++++++------------------ integration/api_test.go | 68 +++++++++++++++-------------- 4 files changed, 127 insertions(+), 148 deletions(-) delete mode 100644 api_params.go diff --git a/api.go b/api.go index 0a7f7abea7..ba8646599d 100644 --- a/api.go +++ b/api.go @@ -89,18 +89,10 @@ func httpError(w http.ResponseWriter, err error) { } } -func writeJSON(w http.ResponseWriter, code int, v interface{}) error { - b, err := json.Marshal(v) - - if err != nil { - return err - } - +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) - w.Write(b) - - return nil + return v.Encode(w) } func getBoolParam(value string) (bool, error) { @@ -352,12 +344,15 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req if err := parseForm(r); err != nil { return err } - config := &Config{} + var ( + config = &Config{} + env engine.Env + job = srv.Eng.Job("commit", r.Form.Get("container")) + ) if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF { utils.Errorf("%s", err) } - job := srv.Eng.Job("commit", r.Form.Get("container")) job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) @@ -369,8 +364,8 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req if err := job.Run(); err != nil { return err } - - return writeJSON(w, http.StatusCreated, &APIID{id}) + env.Set("Id", id) + return writeJSON(w, http.StatusCreated, env) } // Creates an image from Pull or from Import @@ -555,15 +550,19 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r if err := parseForm(r); err != nil { return nil } - out := &APIRun{} - job := srv.Eng.Job("create", r.Form.Get("name")) + var ( + out engine.Env + job = srv.Eng.Job("create", r.Form.Get("name")) + outWarnings []string + outId string + warnings = bytes.NewBuffer(nil) + ) if err := job.DecodeEnv(r.Body); err != nil { return err } // Read container ID from the first line of stdout - job.Stdout.AddString(&out.ID) + job.Stdout.AddString(&outId) // Read warnings from stderr - warnings := &bytes.Buffer{} job.Stderr.Add(warnings) if err := job.Run(); err != nil { return err @@ -571,8 +570,10 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r // Parse warnings from stderr scanner := bufio.NewScanner(warnings) for scanner.Scan() { - out.Warnings = append(out.Warnings, scanner.Text()) + outWarnings = append(outWarnings, scanner.Text()) } + out.Set("Id", outId) + out.SetList("Warnings", outWarnings) return writeJSON(w, http.StatusCreated, out) } @@ -664,18 +665,22 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r * if vars == nil { return fmt.Errorf("Missing parameter") } - job := srv.Eng.Job("wait", vars["name"]) - var statusStr string - job.Stdout.AddString(&statusStr) + var ( + env engine.Env + status string + job = srv.Eng.Job("wait", vars["name"]) + ) + job.Stdout.AddString(&status) if err := job.Run(); err != nil { return err } // Parse a 16-bit encoded integer to map typical unix exit status. - status, err := strconv.ParseInt(statusStr, 10, 16) + _, err := strconv.ParseInt(status, 10, 16) if err != nil { return err } - return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)}) + env.Set("StatusCode", status) + return writeJSON(w, http.StatusOK, env) } func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { @@ -874,24 +879,24 @@ func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r * return fmt.Errorf("Missing parameter") } - copyData := &APICopy{} - contentType := r.Header.Get("Content-Type") - if contentType == "application/json" { - if err := json.NewDecoder(r.Body).Decode(copyData); err != nil { + var copyData engine.Env + + if contentType := r.Header.Get("Content-Type"); contentType == "application/json" { + if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } - if copyData.Resource == "" { + if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } - if copyData.Resource[0] == '/' { - copyData.Resource = copyData.Resource[1:] + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) } - job := srv.Eng.Job("container_copy", vars["name"], copyData.Resource) + job := srv.Eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) diff --git a/api_params.go b/api_params.go deleted file mode 100644 index fb5ad6f388..0000000000 --- a/api_params.go +++ /dev/null @@ -1,43 +0,0 @@ -package docker - -type ( - APITop struct { - Titles []string - Processes [][]string - } - - APIRmi struct { - Deleted string `json:",omitempty"` - Untagged string `json:",omitempty"` - } - - APIID struct { - ID string `json:"Id"` - } - - APIRun struct { - ID string `json:"Id"` - Warnings []string `json:",omitempty"` - } - - APIPort struct { - PrivatePort int64 - PublicPort int64 - Type string - IP string - } - - APIWait struct { - StatusCode int - } - - APIImageConfig struct { - ID string `json:"Id"` - *Config - } - - APICopy struct { - Resource string - HostPath string - } -) diff --git a/commands.go b/commands.go index ff7691c916..1428a769d0 100644 --- a/commands.go +++ b/commands.go @@ -755,18 +755,21 @@ func (cli *DockerCli) CmdTop(args ...string) error { val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false)) + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) if err != nil { return err } - procs := APITop{} - err = json.Unmarshal(body, &procs) - if err != nil { + var procs engine.Env + if err := procs.Decode(stream); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.Titles, "\t")) - for _, proc := range procs.Processes { + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() @@ -1451,25 +1454,25 @@ func (cli *DockerCli) CmdCommit(args ...string) error { v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) - var config *Config + var ( + config *Config + env engine.Env + ) if *flConfig != "" { config = &Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } - body, _, err := readBody(cli.call("POST", "/commit?"+v.Encode(), config, false)) + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) if err != nil { return err } - - apiID := &APIID{} - err = json.Unmarshal(body, apiID) - if err != nil { + if err := env.Decode(stream); err != nil { return err } - fmt.Fprintf(cli.out, "%s\n", apiID.ID) + fmt.Fprintf(cli.out, "%s\n", env.Get("ID")) return nil } @@ -1989,7 +1992,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //create the container - body, statusCode, err := readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false)) + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) //if image not found try to pull it if statusCode == 404 { _, tag := utils.ParseRepositoryTag(config.Image) @@ -2026,30 +2029,30 @@ func (cli *DockerCli) CmdRun(args ...string) error { if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { return err } - if body, _, err = readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false)); err != nil { + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { return err } } else if err != nil { return err } - var runResult APIRun - if err := json.Unmarshal(body, &runResult); err != nil { + var runResult engine.Env + if err := runResult.Decode(stream); err != nil { return err } - for _, warning := range runResult.Warnings { + for _, warning := range runResult.GetList("Warnings") { fmt.Fprintf(cli.err, "WARNING: %s\n", warning) } if len(hostConfig.ContainerIDFile) > 0 { - if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil { + if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { return fmt.Errorf("failed to write the container ID to the file: %s", err) } } if sigProxy { - sigc := cli.forwardAllSignals(runResult.ID) + sigc := cli.forwardAllSignals(runResult.Get("Id")) defer utils.StopCatch(sigc) } @@ -2063,7 +2066,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", runResult.ID) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) }() } @@ -2105,7 +2108,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } errCh = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) }) } else { close(hijacked) @@ -2127,12 +2130,12 @@ func (cli *DockerCli) CmdRun(args ...string) error { } //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig, false)); err != nil { + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { return err } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(runResult.ID); err != nil { + if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { utils.Errorf("Error monitoring TTY size: %s\n", err) } } @@ -2157,26 +2160,26 @@ func (cli *DockerCli) CmdRun(args ...string) error { if autoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container - if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.ID+"/wait", nil, false)); err != nil { + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { return err } - if _, status, err = getExitCode(cli, runResult.ID); err != nil { + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } - if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.ID+"?v=1", nil, false)); err != nil { + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { return err } } else { if !config.Tty { // In non-tty mode, we can't dettach, so we know we need to wait. - if status, err = waitForExit(cli, runResult.ID); err != nil { + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { return err } } else { // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call // and result in a wrong exit code. // No Autoremove: Simply retrieve the exit code - if _, status, err = getExitCode(cli, runResult.ID); err != nil { + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } } @@ -2198,15 +2201,15 @@ func (cli *DockerCli) CmdCp(args ...string) error { return nil } - var copyData APICopy + var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } - copyData.Resource = info[1] - copyData.HostPath = cmd.Arg(1) + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) if stream != nil { @@ -2217,7 +2220,7 @@ func (cli *DockerCli) CmdCp(args ...string) error { } if statusCode == 200 { - if err := archive.Untar(stream, copyData.HostPath, nil); err != nil { + if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { return err } } @@ -2260,13 +2263,21 @@ func (cli *DockerCli) CmdLoad(args ...string) error { } func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { - var params io.Reader + params := bytes.NewBuffer(nil) if data != nil { - buf, err := json.Marshal(data) - if err != nil { - return nil, -1, err + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, -1, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, -1, err + } + if _, err := params.Write(buf); err != nil { + return nil, -1, err + } } - params = bytes.NewBuffer(buf) } // fixme: refactor client to support redirect re := regexp.MustCompile("/+") @@ -2569,16 +2580,16 @@ func (cli *DockerCli) LoadConfigFile() (err error) { } func waitForExit(cli *DockerCli, containerId string) (int, error) { - body, _, err := readBody(cli.call("POST", "/containers/"+containerId+"/wait", nil, false)) + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) if err != nil { return -1, err } - var out APIWait - if err := json.Unmarshal(body, &out); err != nil { + var out engine.Env + if err := out.Decode(stream); err != nil { return -1, err } - return out.StatusCode, nil + return out.GetInt("StatusCode"), nil } // getExitCode perform an inspect on the container. It returns diff --git a/integration/api_test.go b/integration/api_test.go index 95cae47e15..b9ff079cb1 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -485,26 +485,29 @@ func TestGetContainersTop(t *testing.T) { t.Fatal(err) } assertHttpNotError(r, t) - procs := docker.APITop{} - if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil { + var procs engine.Env + if err := procs.Decode(r.Body); err != nil { t.Fatal(err) } - if len(procs.Titles) != 11 { - t.Fatalf("Expected 11 titles, found %d.", len(procs.Titles)) + if len(procs.GetList("Titles")) != 11 { + t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) } - if procs.Titles[0] != "USER" || procs.Titles[10] != "COMMAND" { - t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.Titles[0], procs.Titles[10]) + if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { + t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) } - - if len(procs.Processes) != 2 { - t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes)) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + t.Fatal(err) } - if procs.Processes[0][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[0][10]) + if len(processes) != 2 { + t.Fatalf("Expected 2 processes, found %d.", len(processes)) } - if procs.Processes[1][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[1][10]) + if processes[0][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) + } + if processes[1][10] != "/bin/sh -c cat" { + t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) } } @@ -570,11 +573,11 @@ func TestPostCommit(t *testing.T) { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiID := &docker.APIID{} - if err := json.Unmarshal(r.Body.Bytes(), apiID); err != nil { + var env engine.Env + if err := env.Decode(r.Body); err != nil { t.Fatal(err) } - if _, err := srv.ImageInspect(apiID.ID); err != nil { + if _, err := srv.ImageInspect(env.Get("Id")); err != nil { t.Fatalf("The image has not been committed") } } @@ -607,11 +610,11 @@ func TestPostContainersCreate(t *testing.T) { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiRun := &docker.APIRun{} - if err := json.Unmarshal(r.Body.Bytes(), apiRun); err != nil { + var apiRun engine.Env + if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } - containerID := apiRun.ID + containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) containerRun(eng, containerID, t) @@ -863,12 +866,12 @@ func TestPostContainersWait(t *testing.T) { t.Fatal(err) } assertHttpNotError(r, t) - apiWait := &docker.APIWait{} - if err := json.Unmarshal(r.Body.Bytes(), apiWait); err != nil { + var apiWait engine.Env + if err := apiWait.Decode(r.Body); err != nil { t.Fatal(err) } - if apiWait.StatusCode != 0 { - t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.StatusCode) + if apiWait.GetInt("StatusCode") != 0 { + t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode")) } }) @@ -1160,12 +1163,12 @@ func TestDeleteImages(t *testing.T) { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } - var outs []docker.APIRmi - if err := json.Unmarshal(r2.Body.Bytes(), &outs); err != nil { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } - if len(outs) != 1 { - t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs)) + if len(outs.Data) != 1 { + t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data)) } images = getImages(eng, t, false, "") @@ -1190,14 +1193,17 @@ func TestPostContainersCopy(t *testing.T) { containerRun(eng, containerID, t) r := httptest.NewRecorder() - copyData := docker.APICopy{HostPath: ".", Resource: "/test.txt"} - jsonData, err := json.Marshal(copyData) - if err != nil { + var copyData engine.Env + copyData.Set("Resource", "/test.txt") + copyData.Set("HostPath", ".") + + jsonData := bytes.NewBuffer(nil) + if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } - req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", bytes.NewReader(jsonData)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) if err != nil { t.Fatal(err) } From 24086fa75dff1ebb56dfb8fbf2c80fdfd5be0e8a Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 28 Jan 2014 00:27:02 +0000 Subject: [PATCH 31/33] job.error\* now return engine.StatusErr Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- engine/job.go | 10 +- server.go | 446 +++++++++++++++++--------------------------------- 2 files changed, 154 insertions(+), 302 deletions(-) diff --git a/engine/job.go b/engine/job.go index c2eadccc1b..782bb02171 100644 --- a/engine/job.go +++ b/engine/job.go @@ -188,10 +188,12 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { return fmt.Fprintf(job.Stdout, format, args...) } -func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) { - return fmt.Fprintf(job.Stderr, format, args...) +func (job *Job) Errorf(format string, args ...interface{}) Status { + fmt.Fprintf(job.Stderr, format, args...) + return StatusErr } -func (job *Job) Error(err error) (int, error) { - return fmt.Fprintf(job.Stderr, "%s", err) +func (job *Job) Error(err error) Status { + fmt.Fprintf(job.Stderr, "%s", err) + return StatusErr } diff --git a/server.go b/server.go index 90a8bb3ed8..6eeca79d90 100644 --- a/server.go +++ b/server.go @@ -44,8 +44,7 @@ func jobInitApi(job *engine.Job) engine.Status { job.Logf("Creating server") srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if srv.runtime.config.Pidfile != "" { job.Logf("Creating pidfile") @@ -106,8 +105,7 @@ func jobInitApi(job *engine.Job) engine.Status { "auth": srv.Auth, } { if err := job.Eng.Register(name, handler); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } return engine.StatusOK @@ -130,8 +128,7 @@ func (srv *Server) ListenAndServe(job *engine.Job) engine.Status { for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } @@ -199,8 +196,7 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { } if n := len(job.Args); n < 1 || n > 2 { - job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } name := job.Args[0] var sig uint64 @@ -211,8 +207,7 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { - job.Errorf("Invalid signal: %s", job.Args[1]) - return engine.StatusErr + return job.Errorf("Invalid signal: %s", job.Args[1]) } } } @@ -220,21 +215,18 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { - job.Errorf("Cannot kill container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot kill container %s: %s", name, err) } srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { // Otherwise, just send the requested signal if err := container.kill(int(sig)); err != nil { - job.Errorf("Cannot kill container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } } else { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } return engine.StatusOK } @@ -244,8 +236,7 @@ func (srv *Server) Auth(job *engine.Job) engine.Status { job.GetenvJson("authConfig", authConfig) status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } job.Printf("%s\n", status) return engine.StatusOK @@ -253,8 +244,7 @@ func (srv *Server) Auth(job *engine.Job) engine.Status { func (srv *Server) Events(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s FROM", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s FROM", job.Name) } var ( @@ -304,8 +294,7 @@ func (srv *Server) Events(job *engine.Job) engine.Status { continue } if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } return engine.StatusOK @@ -313,28 +302,24 @@ func (srv *Server) Events(job *engine.Job) engine.Status { func (srv *Server) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s container_id", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { data, err := container.Export() if err != nil { - job.Errorf("%s: %s", name, err) - return engine.StatusErr + return job.Errorf("%s: %s", name, err) } // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { - job.Errorf("%s: %s", name, err) - return engine.StatusErr + return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image)) return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } // ImageExport exports all images with the given tag. All versions @@ -344,15 +329,13 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { // out is the writer where the images are written to. func (srv *Server) ImageExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer os.RemoveAll(tempdir) @@ -360,20 +343,17 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { rootRepo, err := srv.runtime.repositories.Get(name) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if rootRepo != nil { for _, id := range rootRepo { image, err := srv.ImageInspect(id) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } @@ -383,30 +363,25 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } else { image, err := srv.ImageInspect(name) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if _, err := io.Copy(job.Stdout, fs); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } @@ -468,8 +443,7 @@ func (srv *Server) exportImage(image *Image, tempdir string) error { func (srv *Server) Build(job *engine.Job) engine.Status { if len(job.Args) != 0 { - job.Errorf("Usage: %s\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s\n", job.Name) } var ( remoteURL = job.Getenv("remote") @@ -494,38 +468,32 @@ func (srv *Server) Build(job *engine.Job) engine.Status { } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { - job.Errorf("Error trying to use git: %s (%s)", err, output) - return engine.StatusErr + return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } c, err := MkBuildContext(string(dockerFile), nil) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } context = c } @@ -543,8 +511,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) id, err := b.Build(context) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if repoName != "" { srv.runtime.repositories.Set(repoName, tag, id, false) @@ -557,8 +524,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { func (srv *Server) ImageLoad(job *engine.Job) engine.Status { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer os.RemoveAll(tmpImageDir) @@ -569,40 +535,33 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { tarFile, err := os.Create(repoTarFile) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if _, err := io.Copy(tarFile, job.Stdin); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } tarFile.Close() repoFile, err := os.Open(repoTarFile) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if err := os.Mkdir(repoDir, os.ModeDir); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if err := archive.Untar(repoFile, repoDir, nil); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } dirs, err := ioutil.ReadDir(repoDir) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } for _, d := range dirs { if d.IsDir() { if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } } @@ -611,21 +570,18 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { if err == nil { repositories := map[string]Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } for imageName, tagMap := range repositories { for tag, address := range tagMap { if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } } } else if !os.IsNotExist(err) { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK @@ -669,8 +625,7 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { - job.Errorf("Usage: %s TERM", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] @@ -682,13 +637,11 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress()) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } results, err := r.SearchRepositories(term) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { @@ -698,16 +651,14 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageInsert(job *engine.Job) engine.Status { if len(job.Args) != 3 { - job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) } var ( @@ -721,32 +672,27 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { out := utils.NewWriteFlusher(job.Stdout) img, err := srv.runtime.repositories.LookupImage(name) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } file, err := utils.Download(url) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer file.Body.Close() config, _, _, err := ParseRun([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } c, _, err := srv.runtime.Create(config, "") if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } // FIXME: Handle custom repo, tag comment, author img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) @@ -772,8 +718,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { for _, image := range images { parentImage, err = image.GetParent() if err != nil { - job.Errorf("Error while getting parent image: %v", err) - return engine.StatusErr + return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) @@ -808,8 +753,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { allImages, err = srv.runtime.graph.Heads() } if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } lookup := make(map[string]*engine.Env) for name, repository := range srv.runtime.repositories.Repositories { @@ -863,8 +807,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } @@ -907,22 +850,19 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.Set("InitSha1", utils.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { - job.Errorf("Usage: %s IMAGE", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] image, err := srv.runtime.repositories.LookupImage(name) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } lookupMap := make(map[string][]string) @@ -949,16 +889,14 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status { }) outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } func (srv *Server) ContainerTop(job *engine.Job) engine.Status { if len(job.Args) != 1 && len(job.Args) != 2 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) } var ( name = job.Args[0] @@ -971,18 +909,15 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status { if container := srv.runtime.Get(name); container != nil { if !container.State.IsRunning() { - job.Errorf("Container %s is not running", name) - return engine.StatusErr + return job.Errorf("Container %s is not running", name) } pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } output, err := exec.Command("ps", psArgs).Output() if err != nil { - job.Errorf("Error running ps: %s", err) - return engine.StatusErr + return job.Errorf("Error running ps: %s", err) } lines := strings.Split(string(output), "\n") @@ -997,8 +932,7 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status { } } if pidIndex == -1 { - job.Errorf("Couldn't find PID field in ps output") - return engine.StatusErr + return job.Errorf("Couldn't find PID field in ps output") } processes := [][]string{} @@ -1009,8 +943,7 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status { fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { - job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - return engine.StatusErr + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } for _, pid := range pids { @@ -1028,38 +961,32 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status { return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { - job.Errorf("Usage: %s CONTAINER", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } else { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } return engine.StatusOK } @@ -1108,8 +1035,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } out.Set("Ports", str) if size { @@ -1121,34 +1047,29 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container := srv.runtime.Get(name) if container == nil { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } var config Config if err := job.GetenvJson("config", &config); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK @@ -1156,16 +1077,14 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { - job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } @@ -1402,8 +1321,7 @@ func (srv *Server) poolRemove(kind, key string) error { func (srv *Server) ImagePull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { - job.Errorf("Usage: %s IMAGE [TAG]", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] @@ -1427,22 +1345,19 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { <-c return engine.StatusOK } - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer srv.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name endpoint, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if endpoint == auth.IndexServerAddress() { @@ -1451,8 +1366,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { } if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK @@ -1621,8 +1535,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, // FIXME: Allow to interrupt current push when new push of same image is done. func (srv *Server) ImagePush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { - job.Errorf("Usage: %s IMAGE", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] @@ -1634,23 +1547,20 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name endpoint, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } img, err := srv.runtime.graph.Get(localName) r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err2 != nil { - job.Error(err2) - return engine.StatusErr + return job.Error(err2) } if err != nil { @@ -1659,28 +1569,24 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { // If it fails, try to get the repository if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } - job.Error(err) - return engine.StatusErr + return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { - job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] @@ -1699,8 +1605,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { } else { u, err := url.Parse(src) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" @@ -1712,21 +1617,18 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { // If curl is not available, fallback to http.Get() resp, err = utils.Download(u.String()) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } archive = utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") } img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) @@ -1738,13 +1640,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { - job.Printf("Usage: %s", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s", job.Name) } config := ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { - job.Errorf("Minimum memory limit allowed is 512k") - return engine.StatusErr + return job.Errorf("Minimum memory limit allowed is 512k") } if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { job.Errorf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") @@ -1771,11 +1671,9 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if tag == "" { tag = DEFAULTTAG } - job.Errorf("No such image: %s (tag: %s)", config.Image, tag) - return engine.StatusErr + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } - job.Error(err) - return engine.StatusErr + return job.Error(err) } if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { job.Errorf("WARNING: IPv4 forwarding is disabled.\n") @@ -1788,15 +1686,14 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { job.Printf("%s\n", container.ID) } for _, warning := range buildWarnings { - job.Errorf("%s\n", warning) + return job.Errorf("%s\n", warning) } return engine.StatusOK } func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] @@ -1807,22 +1704,18 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { } if container := srv.runtime.Get(name); container != nil { if err := container.Restart(int(t)); err != nil { - job.Errorf("Cannot restart container %s: %s\n", name, err) - return engine.StatusErr + return job.Errorf("Cannot restart container %s: %s\n", name, err) } srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { - job.Errorf("No such container: %s\n", name) - return engine.StatusErr + return job.Errorf("No such container: %s\n", name) } return engine.StatusOK - } func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") @@ -1832,23 +1725,19 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if removeLink { if container == nil { - job.Errorf("No such link: %s", name) - return engine.StatusErr + return job.Errorf("No such link: %s", name) } name, err := getFullName(name) if err != nil { job.Error(err) - return engine.StatusErr } parent, n := path.Split(name) if parent == "/" { - job.Errorf("Conflict, cannot remove the default name of the container") - return engine.StatusErr + return job.Errorf("Conflict, cannot remove the default name of the container") } pe := srv.runtime.containerGraph.Get(parent) if pe == nil { - job.Errorf("Cannot get parent %s for name %s", parent, name) - return engine.StatusErr + return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := srv.runtime.Get(pe.ID()) @@ -1861,16 +1750,14 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { } if err := srv.runtime.containerGraph.Delete(name); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } if container != nil { if container.State.IsRunning() { - job.Errorf("Impossible to remove a running container, please stop it first") - return engine.StatusErr + return job.Errorf("Impossible to remove a running container, please stop it first") } volumes := make(map[string]struct{}) @@ -1895,8 +1782,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { volumes[volumeId] = struct{}{} } if err := srv.runtime.Destroy(container); err != nil { - job.Errorf("Cannot destroy container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot destroy container %s: %s", name, err) } srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image)) @@ -1916,14 +1802,12 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { continue } if err := srv.runtime.volumes.Delete(volumeId); err != nil { - job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) - return engine.StatusErr + return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) } } } } else { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } return engine.StatusOK } @@ -2075,22 +1959,18 @@ func (srv *Server) DeleteImage(name string, autoPrune bool) (*engine.Table, erro func (srv *Server) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { - job.Errorf("Usage: %s IMAGE", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s IMAGE", job.Name) } imgs, err := srv.DeleteImage(job.Args[0], job.GetenvBool("autoPrune")) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if len(imgs.Data) == 0 { - job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) - return engine.StatusErr + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } @@ -2180,16 +2060,14 @@ func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) e func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { - job.Errorf("Usage: %s container_id", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] runtime := srv.runtime container := runtime.Get(name) if container == nil { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { @@ -2205,8 +2083,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { // refuse to bind mount "/" to the container if source == "/" { - job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) - return engine.StatusErr + return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) } // ensure the source exists on the host @@ -2214,22 +2091,19 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if err != nil && os.IsNotExist(err) { err = os.MkdirAll(source, 0755) if err != nil { - job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) - return engine.StatusErr + return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) } } } // Register any links from the host config before starting the container if err := srv.RegisterLinks(container, hostConfig); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } container.hostConfig = hostConfig container.ToDisk() } if err := container.Start(); err != nil { - job.Errorf("Cannot start container %s: %s", name, err) - return engine.StatusErr + return job.Errorf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) @@ -2238,8 +2112,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { func (srv *Server) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] @@ -2250,21 +2123,18 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status { } if container := srv.runtime.Get(name); container != nil { if err := container.Stop(int(t)); err != nil { - job.Errorf("Cannot stop container %s: %s\n", name, err) - return engine.StatusErr + return job.Errorf("Cannot stop container %s: %s\n", name, err) } srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { - job.Errorf("No such container: %s\n", name) - return engine.StatusErr + return job.Errorf("No such container: %s\n", name) } return engine.StatusOK } func (srv *Server) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { @@ -2272,41 +2142,34 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status { job.Printf("%d\n", status) return engine.StatusOK } - job.Errorf("%s: no such container: %s", job.Name, name) - return engine.StatusErr + return job.Errorf("%s: no such container: %s", job.Name, name) } func (srv *Server) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { - job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) - return engine.StatusErr + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if container := srv.runtime.Get(name); container != nil { if err := container.Resize(height, width); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { if len(job.Args) != 1 { - job.Errorf("Usage: %s CONTAINER\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( @@ -2320,8 +2183,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { container := srv.runtime.Get(name) if container == nil { - job.Errorf("No such container: %s", name) - return engine.StatusErr + return job.Errorf("No such container: %s", name) } //logs @@ -2372,8 +2234,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { //stream if stream { if container.State.IsGhost() { - job.Errorf("Impossible to attach to a ghost container") - return engine.StatusErr + return job.Errorf("Impossible to attach to a ghost container") } var ( @@ -2427,8 +2288,7 @@ func (srv *Server) ImageInspect(name string) (*Image, error) { func (srv *Server) JobInspect(job *engine.Job) engine.Status { // TODO: deprecate KIND/conflict if n := len(job.Args); n != 2 { - job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) } var ( name = job.Args[0] @@ -2440,35 +2300,30 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status { ) if conflict && image != nil && container != nil { - job.Errorf("Conflict between containers and images") - return engine.StatusErr + return job.Errorf("Conflict between containers and images") } switch kind { case "image": if errImage != nil { - job.Error(errImage) - return engine.StatusErr + return job.Error(errImage) } object = image case "container": if errContainer != nil { - job.Error(errContainer) - return engine.StatusErr + return job.Error(errContainer) } object = &struct { *Container HostConfig *HostConfig }{container, container.hostConfig} default: - job.Errorf("Unknown kind: %s", kind) - return engine.StatusErr + return job.Errorf("Unknown kind: %s", kind) } b, err := json.Marshal(object) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK @@ -2476,8 +2331,7 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status { func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { - job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) - return engine.StatusErr + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( @@ -2489,19 +2343,15 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { data, err := container.Copy(resource) if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if _, err := io.Copy(job.Stdout, data); err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } return engine.StatusOK } - job.Errorf("No such container: %s", name) - return engine.StatusErr - + return job.Errorf("No such container: %s", name) } func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { From 8fbdb7b59eba078bf24546686e005cc86a60e493 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 28 Jan 2014 03:26:24 +0000 Subject: [PATCH 32/33] add setSubEnv and getSubEnv Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api.go | 18 +++++++----------- engine/env.go | 22 ++++++++++++++++++++++ engine/job.go | 8 ++++++++ server.go | 3 +-- 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/api.go b/api.go index ba8646599d..4d294667ac 100644 --- a/api.go +++ b/api.go @@ -345,11 +345,11 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req return err } var ( - config = &Config{} + config engine.Env env engine.Env job = srv.Eng.Job("commit", r.Form.Get("container")) ) - if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF { + if err := config.Import(r.Body); err != nil { utils.Errorf("%s", err) } @@ -357,7 +357,7 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) - job.SetenvJson("config", config) + job.SetenvSubEnv("config", &config) var id string job.Stdout.AddString(&id) @@ -704,18 +704,14 @@ func postContainersAttach(srv *Server, version float64, w http.ResponseWriter, r return fmt.Errorf("Missing parameter") } - // TODO: replace the buffer by job.AddEnv() var ( job = srv.Eng.Job("inspect", vars["name"], "container") - buffer = bytes.NewBuffer(nil) - c Container + c, err = job.Stdout.AddEnv() ) - job.Stdout.Add(buffer) - if err := job.Run(); err != nil { + if err != nil { return err } - - if err := json.Unmarshal(buffer.Bytes(), &c); err != nil { + if err = job.Run(); err != nil { return err } @@ -742,7 +738,7 @@ func postContainersAttach(srv *Server, version float64, w http.ResponseWriter, r fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - if !c.Config.Tty && version >= 1.6 { + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version >= 1.6 { errStream = utils.NewStdWriter(outStream, utils.Stderr) outStream = utils.NewStdWriter(outStream, utils.Stdout) } else { diff --git a/engine/env.go b/engine/env.go index d6ca4ec07b..ce8c34bb24 100644 --- a/engine/env.go +++ b/engine/env.go @@ -86,6 +86,28 @@ func (env *Env) GetList(key string) []string { return l } +func (env *Env) GetSubEnv(key string) *Env { + sval := env.Get(key) + if sval == "" { + return nil + } + buf := bytes.NewBufferString(sval) + var sub Env + if err := sub.Decode(buf); err != nil { + return nil + } + return &sub +} + +func (env *Env) SetSubEnv(key string, sub *Env) error { + var buf bytes.Buffer + if err := sub.Encode(&buf); err != nil { + return err + } + env.Set(key, string(buf.Bytes())) + return nil +} + func (env *Env) GetJson(key string, iface interface{}) error { sval := env.Get(key) if sval == "" { diff --git a/engine/job.go b/engine/job.go index 782bb02171..1f35ac85ff 100644 --- a/engine/job.go +++ b/engine/job.go @@ -118,6 +118,14 @@ func (job *Job) SetenvBool(key string, value bool) { job.env.SetBool(key, value) } +func (job *Job) GetenvSubEnv(key string) *Env { + return job.env.GetSubEnv(key) +} + +func (job *Job) SetenvSubEnv(key string, value *Env) error { + return job.env.SetSubEnv(key, value) +} + func (job *Job) GetenvInt64(key string) int64 { return job.env.GetInt64(key) } diff --git a/server.go b/server.go index 6eeca79d90..d8195f3de0 100644 --- a/server.go +++ b/server.go @@ -1656,8 +1656,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { } resolvConf, err := utils.GetResolvConf() if err != nil { - job.Error(err) - return engine.StatusErr + return job.Error(err) } if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { job.Errorf("WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v\n", defaultDns) From d86c1b064dad349737b4a423fe8d0c42db4ca45e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 30 Jan 2014 23:05:16 -0800 Subject: [PATCH 33/33] Not not allocate networks first ip Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- networkdriver/ipallocator/allocator.go | 7 ++++++- networkdriver/ipallocator/allocator_test.go | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/networkdriver/ipallocator/allocator.go b/networkdriver/ipallocator/allocator.go index 33401d5caf..1c5a7b4cc2 100644 --- a/networkdriver/ipallocator/allocator.go +++ b/networkdriver/ipallocator/allocator.go @@ -99,12 +99,17 @@ func getNextIp(address *net.IPNet) (*net.IP, error) { return ip, nil } + var ( + firstNetIP = address.IP.To4().Mask(address.Mask) + firstAsInt = ipToInt(&firstNetIP) + 1 + ) + pos = int32(allocated.PullBack()) for i := int32(0); i < max; i++ { pos = pos%max + 1 next := int32(base + pos) - if next == ownIP { + if next == ownIP || next == firstAsInt { continue } diff --git a/networkdriver/ipallocator/allocator_test.go b/networkdriver/ipallocator/allocator_test.go index 871f143521..5e9fcfc983 100644 --- a/networkdriver/ipallocator/allocator_test.go +++ b/networkdriver/ipallocator/allocator_test.go @@ -213,6 +213,27 @@ func TestIPAllocator(t *testing.T) { } } +func TestAllocateFirstIP(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 0}, + Mask: []byte{255, 255, 255, 0}, + } + + firstIP := network.IP.To4().Mask(network.Mask) + first := ipToInt(&firstIP) + 1 + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + allocated := ipToInt(ip) + + if allocated == first { + t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) + } +} + func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { if !ip1.Equal(*ip2) { t.Fatalf("Expected IP %s, got %s", ip1, ip2)