diff --git a/AUTHORS b/AUTHORS index 9a0d8ecc5d..60533c1cc2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -44,6 +44,7 @@ Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin +Darren Coxall David Calavera David Sissitka Deni Bertovic @@ -120,6 +121,7 @@ Marko Mikulicic Markus Fix Martin Redmond Matt Apperson +Mathieu Le Marec - Pasquet Matt Bachmann Matthew Mueller Maxim Treskin diff --git a/Dockerfile b/Dockerfile index df08761a61..46717a2810 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ run apt-get install -y -q mercurial run apt-get install -y -q build-essential libsqlite3-dev # Install Go -run curl -s https://go.googlecode.com/files/go1.2rc4.src.tar.gz | tar -v -C /usr/local -xz +run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std diff --git a/Vagrantfile b/Vagrantfile index a0bb38ca4f..6bbea51d46 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -70,7 +70,7 @@ SCRIPT # trigger dkms to build the virtualbox guest module install. $vbox_script = <`). + + :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success + :query q: suppress verbose build output + :query nocache: do not use the cache when building the image + :reqheader Content-type: should be set to ``"application/tar"``. + :statuscode 200: no error + :statuscode 500: server error - :query t: repository name (and optionally a tag) to be applied to the resulting image in case of success - :query q: suppress verbose build output - :query nocache: do not use the cache when building the image - :statuscode 200: no error - :statuscode 500: server error Check auth configuration diff --git a/docs/sources/commandline/cli.rst b/docs/sources/commandline/cli.rst index 37f371baad..d0a8d83c0c 100644 --- a/docs/sources/commandline/cli.rst +++ b/docs/sources/commandline/cli.rst @@ -231,9 +231,33 @@ Full -run example :: - Usage: docker diff CONTAINER [OPTIONS] + Usage: docker diff CONTAINER + + List the changed files and directories in a container's filesystem - Inspect changes on a container's filesystem +There are 3 events that are listed in the 'diff': + +1. ```A``` - Add +2. ```D``` - Delete +3. ```C``` - Change + +for example: + +.. code-block:: bash + + $ sudo docker diff 7bb0e258aefe + + C /dev + A /dev/kmsg + C /etc + A /etc/mtab + A /go + A /go/src + A /go/src/github.com + A /go/src/github.com/dotcloud + A /go/src/github.com/dotcloud/docker + A /go/src/github.com/dotcloud/docker/.git + .... .. _cli_events: @@ -323,6 +347,40 @@ Show events in the past from a specified time -notrunc=false: Don't truncate output -q=false: only show numeric IDs +To see how the docker:latest image was built: + +.. code-block:: bash + + $ docker history docker + ID CREATED CREATED BY + docker:latest 19 hours ago /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker + cf5f2467662d 2 weeks ago /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"] + 3538fbe372bf 2 weeks ago /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker + 7450f65072e5 2 weeks ago /bin/sh -c #(nop) VOLUME /var/lib/docker + b79d62b97328 2 weeks ago /bin/sh -c apt-get install -y -q lxc + 36714852a550 2 weeks ago /bin/sh -c apt-get install -y -q iptables + 8c4c706df1d6 2 weeks ago /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg + b89989433c48 2 weeks ago /bin/sh -c pip install python-magic + a23e640d85b5 2 weeks ago /bin/sh -c pip install s3cmd + 41f54fec7e79 2 weeks ago /bin/sh -c apt-get install -y -q python-pip + d9bc04add907 2 weeks ago /bin/sh -c apt-get install -y -q reprepro dpkg-sig + e74f4760fa70 2 weeks ago /bin/sh -c gem install --no-rdoc --no-ri fpm + 1e43224726eb 2 weeks ago /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev + 460953ae9d7f 2 weeks ago /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor + 8b63eb1d666b 2 weeks ago /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin + 3087f3bcedf2 2 weeks ago /bin/sh -c #(nop) ENV GOROOT=/goroot + 635840d198e5 2 weeks ago /bin/sh -c cd /goroot/src && ./make.bash + 439f4a0592ba 2 weeks ago /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot + 13967ed36e93 2 weeks ago /bin/sh -c #(nop) ENV CGO_ENABLED=0 + bf7424458437 2 weeks ago /bin/sh -c apt-get install -y -q build-essential + a89ec997c3bf 2 weeks ago /bin/sh -c apt-get install -y -q mercurial + b9f165c6e749 2 weeks ago /bin/sh -c apt-get install -y -q git + 17a64374afa7 2 weeks ago /bin/sh -c apt-get install -y -q curl + d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update + 13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list + ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes + ubuntu:12.04 6 months ago + .. _cli_images: ``images`` @@ -435,6 +493,21 @@ might not get preserved. Display system-wide information. +.. code-block:: bash + + $ sudo docker info + Containers: 292 + Images: 194 + Debug mode (server): false + Debug mode (client): false + Fds: 22 + Goroutines: 67 + LXC Version: 0.9.0 + EventsListeners: 115 + Kernel Version: 3.8.0-33-generic + WARNING: No swap limit support + + .. _cli_insert: ``insert`` @@ -772,6 +845,13 @@ id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (rw or ro) as the reference container. +Known Issues (run -volumes-from) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* :issue:`2702`: "lxc-start: Permission denied - failed to mount" + could indicate a permissions problem with AppArmor. Please see the + issue for a workaround. + .. _cli_search: ``search`` diff --git a/docs/sources/conf.py b/docs/sources/conf.py index 0ccd4a4ed5..a143e821be 100644 --- a/docs/sources/conf.py +++ b/docs/sources/conf.py @@ -235,7 +235,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('toctree', 'docker', u'Docker Documentation', + ('commandline/cli', 'docker', u'Docker Documentation', [u'Team Docker'], 1) ] diff --git a/docs/sources/examples/couchdb_data_volumes.rst b/docs/sources/examples/couchdb_data_volumes.rst index 60674fe9a0..1f6b4b7910 100644 --- a/docs/sources/examples/couchdb_data_volumes.rst +++ b/docs/sources/examples/couchdb_data_volumes.rst @@ -20,7 +20,7 @@ Note that we're marking ``/var/lib/couchdb`` as a data volume. .. code-block:: bash - COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03) + COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) Add data to the first database ------------------------------ @@ -31,7 +31,7 @@ replace ``localhost`` with the public IP of your Docker host. .. code-block:: bash HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH1 5984)/_utils/" + URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -Po '\d+$')/_utils/" echo "Navigate to $URL in your browser, and use the couch interface to add data" Create second database @@ -41,7 +41,7 @@ This time, we're requesting shared access to ``$COUCH1``'s volumes. .. code-block:: bash - COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03) + COUCH2=$(sudo docker run -d -p 5984 -volumes-from $COUCH1 shykes/couchdb:2013-05-03) Browse data on the second database ---------------------------------- @@ -49,7 +49,7 @@ Browse data on the second database .. code-block:: bash HOST=localhost - URL="http://$HOST:$(sudo docker port $COUCH2 5984)/_utils/" + URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -Po '\d+$')/_utils/" echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!' Congratulations, you are now running two Couchdb containers, completely diff --git a/docs/sources/examples/example_header.inc b/docs/sources/examples/example_header.inc index 607421fc13..0621b39794 100644 --- a/docs/sources/examples/example_header.inc +++ b/docs/sources/examples/example_header.inc @@ -1,4 +1,7 @@ .. note:: - This example assumes you have Docker running in daemon mode. For more information please see :ref:`running_examples` + * This example assumes you have Docker running in daemon mode. For + more information please see :ref:`running_examples`. + * **If you don't like sudo** then see :ref:`dockergroup` + diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst index 6d6c1b28a6..99eaa2c483 100644 --- a/docs/sources/examples/hello_world.rst +++ b/docs/sources/examples/hello_world.rst @@ -127,10 +127,12 @@ Check the logs make sure it is working correctly. sudo docker attach $CONTAINER_ID -Attach to the container to see the results in realtime. +Attach to the container to see the results in real-time. - **"docker attach**" This will allow us to attach to a background process to see what is going on. +- **"-sig-proxy=true"** Proxify all received signal to the process + (even in non-tty mode) - **$CONTAINER_ID** The Id of the container we want to attach too. Exit from the container attachment by pressing Control-C. diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst index e3f5f5eb83..3034bf980a 100644 --- a/docs/sources/examples/python_web_app.rst +++ b/docs/sources/examples/python_web_app.rst @@ -39,11 +39,12 @@ container. The ``BUILD_JOB`` environment variable will be set with the new conta .. code-block:: bash - sudo docker attach $BUILD_JOB + sudo docker attach -sig-proxy=false $BUILD_JOB [...] While this container is running, we can attach to the new container to -see what is going on. You can use Ctrl-C to disconnect. +see what is going on. The flag ``-sig-proxy`` set as ``false`` allows you to connect and +disconnect (Ctrl-C) to it without stopping the container. .. code-block:: bash diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst index edcf73e657..886f473ef2 100644 --- a/docs/sources/examples/running_redis_service.rst +++ b/docs/sources/examples/running_redis_service.rst @@ -29,7 +29,7 @@ with your own user name. .. code-block:: bash - sudo docker build -t /redis + sudo docker build -t /redis . Run the service --------------- @@ -82,7 +82,7 @@ of our ``redis`` container. DB_PORT_6379_TCP_ADDR=172.17.0.33 DB_PORT_6379_TCP_PROTO=tcp -We can see that we've got a small list of environmental varaibles prefixed with ``DB``. +We can see that we've got a small list of environment variables prefixed with ``DB``. The ``DB`` comes from the link alias specified when we launched the container. Let's use the ``DB_PORT_6379_TCP_ADDR`` variable to connect to our Redis container. diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst index 59a80fbf6e..3d0a782678 100644 --- a/docs/sources/examples/running_ssh_service.rst +++ b/docs/sources/examples/running_ssh_service.rst @@ -12,9 +12,9 @@ SSH Daemon Service **Video:** -I've create a little screencast to show how to create a SSHd service +I've created a little screencast to show how to create an SSHd service and connect to it. It is something like 11 minutes and not entirely -smooth, but gives you a good idea. +smooth, but it gives you a good idea. .. note:: This screencast was created before Docker version 0.5.2, so the diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index 1a73cb7ae6..9004214181 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -19,11 +19,12 @@ Contents: ubuntulinux binaries + security + upgrading + kernel vagrant windows amazon rackspace archlinux gentoolinux - upgrading - kernel diff --git a/docs/sources/installation/kernel.rst b/docs/sources/installation/kernel.rst index bc8440fe2e..b9abdc2722 100644 --- a/docs/sources/installation/kernel.rst +++ b/docs/sources/installation/kernel.rst @@ -25,6 +25,7 @@ If you cannot or do not want to use the "official" kernels, here is some technical background about the features (both optional and mandatory) that docker needs to run successfully. + Linux version 3.8 or above -------------------------- @@ -39,6 +40,15 @@ The symptoms include: - kernel crash causing the machine to freeze for a few minutes, or even completely. +Additionally, kernels prior 3.4 did not implement ``reboot_pid_ns``, +which means that the ``reboot()`` syscall could reboot the host machine, +instead of terminating the container. To work around that problem, +LXC userland tools (since version 0.8) automatically drop the ``SYS_BOOT`` +capability when necessary. Still, if you run a pre-3.4 kernel with pre-0.8 +LXC tools, be aware that containers can reboot the whole host! This is +not something that Docker wants to address in the short term, since you +shouldn't use kernels prior 3.8 with Docker anyway. + While it is still possible to use older kernels for development, it is really not advised to do so. diff --git a/docs/sources/installation/security.rst b/docs/sources/installation/security.rst new file mode 100644 index 0000000000..3dc5780e85 --- /dev/null +++ b/docs/sources/installation/security.rst @@ -0,0 +1,267 @@ +:title: Docker Security +:description: Review of the Docker Daemon attack surface +:keywords: Docker, Docker documentation, security + +.. _dockersecurity: + +Docker Security +=============== + + *Adapted from* `Containers & Docker: How Secure are They? `_ + +There are three major areas to consider when reviewing Docker security: + +* the intrinsic security of containers, as implemented by kernel + namespaces and cgroups; +* the attack surface of the Docker daemon itself; +* the "hardening" security features of the kernel and how they + interact with containers. + +Kernel Namespaces +----------------- + +Docker containers are essentially LXC containers, and they come with +the same security features. When you start a container with ``docker +run``, behind the scenes Docker uses ``lxc-start`` to execute the +Docker container. This creates a set of namespaces and control groups +for the container. Those namespaces and control groups are not created +by Docker itself, but by ``lxc-start``. This means that as the LXC +userland tools evolve (and provide additional namespaces and isolation +features), Docker will automatically make use of them. + +**Namespaces provide the first and most straightforward form of +isolation**: processes running within a container cannot see, and even +less affect, processes running in another container, or in the host +system. + +**Each container also gets its own network stack**, meaning that a +container doesn’t get a privileged access to the sockets or interfaces +of another container. Of course, if the host system is setup +accordingly, containers can interact with each other through their +respective network interfaces — just like they can interact with +external hosts. When you specify public ports for your containers or +use :ref:`links ` then IP traffic is allowed +between containers. They can ping each other, send/receive UDP +packets, and establish TCP connections, but that can be restricted if +necessary. From a network architecture point of view, all containers +on a given Docker host are sitting on bridge interfaces. This means +that they are just like physical machines connected through a common +Ethernet switch; no more, no less. + +How mature is the code providing kernel namespaces and private +networking? Kernel namespaces were introduced `between kernel version +2.6.15 and 2.6.26 +`_. This +means that since July 2008 (date of the 2.6.26 release, now 5 years +ago), namespace code has been exercised and scrutinized on a large +number of production systems. And there is more: the design and +inspiration for the namespaces code are even older. Namespaces are +actually an effort to reimplement the features of `OpenVZ +`_ in such a way that they could +be merged within the mainstream kernel. And OpenVZ was initially +released in 2005, so both the design and the implementation are +pretty mature. + +Control Groups +-------------- + +Control Groups are the other key component of Linux Containers. They +implement resource accounting and limiting. They provide a lot of very +useful metrics, but they also help to ensure that each container gets +its fair share of memory, CPU, disk I/O; and, more importantly, that a +single container cannot bring the system down by exhausting one of +those resources. + +So while they do not play a role in preventing one container from +accessing or affecting the data and processes of another container, +they are essential to fend off some denial-of-service attacks. They +are particularly important on multi-tenant platforms, like public and +private PaaS, to guarantee a consistent uptime (and performance) even +when some applications start to misbehave. + +Control Groups have been around for a while as well: the code was +started in 2006, and initially merged in kernel 2.6.24. + +Docker Daemon Attack Surface +---------------------------- + +Running containers (and applications) with Docker implies running the +Docker daemon. This daemon currently requires root privileges, and you +should therefore be aware of some important details. + +First of all, **only trusted users should be allowed to control your +Docker daemon**. This is a direct consequence of some powerful Docker +features. Specifically, Docker allows you to share a directory between +the Docker host and a guest container; and it allows you to do so +without limiting the access rights of the container. This means that +you can start a container where the ``/host`` directory will be the +``/`` directory on your host; and the container will be able to alter +your host filesystem without any restriction. This sounds crazy? Well, +you have to know that **all virtualization systems allowing filesystem +resource sharing behave the same way**. Nothing prevents you from +sharing your root filesystem (or even your root block device) with a +virtual machine. + +This has a strong security implication: if you instrument Docker from +e.g. a web server to provision containers through an API, you should +be even more careful than usual with parameter checking, to make sure +that a malicious user cannot pass crafted parameters causing Docker to +create arbitrary containers. + +For this reason, the REST API endpoint (used by the Docker CLI to +communicate with the Docker daemon) changed in Docker 0.5.2, and now +uses a UNIX socket instead of a TCP socket bound on 127.0.0.1 (the +latter being prone to cross-site-scripting attacks if you happen to +run Docker directly on your local machine, outside of a VM). You can +then use traditional UNIX permission checks to limit access to the +control socket. + +You can also expose the REST API over HTTP if you explicitly decide +so. However, if you do that, being aware of the abovementioned +security implication, you should ensure that it will be reachable +only from a trusted network or VPN; or protected with e.g. ``stunnel`` +and client SSL certificates. + +Recent improvements in Linux namespaces will soon allow to run +full-featured containers without root privileges, thanks to the new +user namespace. This is covered in detail `here +`_. Moreover, +this will solve the problem caused by sharing filesystems between host +and guest, since the user namespace allows users within containers +(including the root user) to be mapped to other users in the host +system. + +The end goal for Docker is therefore to implement two additional +security improvements: + +* map the root user of a container to a non-root user of the Docker + host, to mitigate the effects of a container-to-host privilege + escalation; +* allow the Docker daemon to run without root privileges, and delegate + operations requiring those privileges to well-audited sub-processes, + each with its own (very limited) scope: virtual network setup, + filesystem management, etc. + +Finally, if you run Docker on a server, it is recommended to run +exclusively Docker in the server, and move all other services within +containers controlled by Docker. Of course, it is fine to keep your +favorite admin tools (probably at least an SSH server), as well as +existing monitoring/supervision processes (e.g. NRPE, collectd, etc). + +Linux Kernel Capabilities +------------------------- + +By default, Docker starts containers with a very restricted set of +capabilities. What does that mean? + +Capabilities turn the binary "root/non-root" dichotomy into a +fine-grained access control system. Processes (like web servers) that +just need to bind on a port below 1024 do not have to run as root: +they can just be granted the ``net_bind_service`` capability +instead. And there are many other capabilities, for almost all the +specific areas where root privileges are usually needed. + +This means a lot for container security; let’s see why! + +Your average server (bare metal or virtual machine) needs to run a +bunch of processes as root. Those typically include SSH, cron, +syslogd; hardware management tools (to e.g. load modules), network +configuration tools (to handle e.g. DHCP, WPA, or VPNs), and much +more. A container is very different, because almost all of those tasks +are handled by the infrastructure around the container: + +* SSH access will typically be managed by a single server running in + the Docker host; +* ``cron``, when necessary, should run as a user process, dedicated + and tailored for the app that needs its scheduling service, rather + than as a platform-wide facility; +* log management will also typically be handed to Docker, or by + third-party services like Loggly or Splunk; +* hardware management is irrelevant, meaning that you never need to + run ``udevd`` or equivalent daemons within containers; +* network management happens outside of the containers, enforcing + separation of concerns as much as possible, meaning that a container + should never need to perform ``ifconfig``, ``route``, or ip commands + (except when a container is specifically engineered to behave like a + router or firewall, of course). + +This means that in most cases, containers will not need "real" root +privileges *at all*. And therefore, containers can run with a reduced +capability set; meaning that "root" within a container has much less +privileges than the real "root". For instance, it is possible to: + +* deny all "mount" operations; +* deny access to raw sockets (to prevent packet spoofing); +* deny access to some filesystem operations, like creating new device + nodes, changing the owner of files, or altering attributes + (including the immutable flag); +* deny module loading; +* and many others. + +This means that even if an intruder manages to escalate to root within +a container, it will be much harder to do serious damage, or to +escalate to the host. + +This won't affect regular web apps; but malicious users will find that +the arsenal at their disposal has shrunk considerably! You can see +`the list of dropped capabilities in the Docker code +`_, +and a full list of available capabilities in `Linux manpages +`_. + +Of course, you can always enable extra capabilities if you really need +them (for instance, if you want to use a FUSE-based filesystem), but +by default, Docker containers will be locked down to ensure maximum +safety. + +Other Kernel Security Features +------------------------------ + +Capabilities are just one of the many security features provided by +modern Linux kernels. It is also possible to leverage existing, +well-known systems like TOMOYO, AppArmor, SELinux, GRSEC, etc. with +Docker. + +While Docker currently only enables capabilities, it doesn't interfere +with the other systems. This means that there are many different ways +to harden a Docker host. Here are a few examples. + +* You can run a kernel with GRSEC and PAX. This will add many safety + checks, both at compile-time and run-time; it will also defeat many + exploits, thanks to techniques like address randomization. It + doesn’t require Docker-specific configuration, since those security + features apply system-wide, independently of containers. +* If your distribution comes with security model templates for LXC + containers, you can use them out of the box. For instance, Ubuntu + comes with AppArmor templates for LXC, and those templates provide + an extra safety net (even though it overlaps greatly with + capabilities). +* You can define your own policies using your favorite access control + mechanism. Since Docker containers are standard LXC containers, + there is nothing “magic” or specific to Docker. + +Just like there are many third-party tools to augment Docker +containers with e.g. special network topologies or shared filesystems, +you can expect to see tools to harden existing Docker containers +without affecting Docker’s core. + +Conclusions +----------- + +Docker containers are, by default, quite secure; especially if you +take care of running your processes inside the containers as +non-privileged users (i.e. non root). + +You can add an extra layer of safety by enabling Apparmor, SELinux, +GRSEC, or your favorite hardening solution. + +Last but not least, if you see interesting security features in other +containerization systems, you will be able to implement them as well +with Docker, since everything is provided by the kernel anyway. + +For more context and especially for comparisons with VMs and other +container systems, please also see the `original blog post +`_. + +.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/ + diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index d1ad081f99..0b3fafced1 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -34,13 +34,11 @@ This will find the ``ubuntu`` image by name in the :ref:`Central Index ` and download it from the top-level Central Repository to a local image cache. -.. NOTE:: When the image has successfully downloaded, you will see a 12 -character hash ``539c0211cd76: Download complete`` which is the short -form of the image ID. These short image IDs are the first 12 characters -of the full image ID - which can be found using ``docker inspect`` or -``docker images -notrunc=true`` - -.. _dockergroup: +.. NOTE:: When the image has successfully downloaded, you will see a + 12 character hash ``539c0211cd76: Download complete`` which is the + short form of the image ID. These short image IDs are the first 12 + characters of the full image ID - which can be found using ``docker + inspect`` or ``docker images -notrunc=true`` Running an interactive shell ---------------------------- @@ -53,33 +51,38 @@ Running an interactive shell # use the escape sequence Ctrl-p + Ctrl-q sudo docker run -i -t ubuntu /bin/bash +.. _dockergroup: -Why ``sudo``? -------------- +sudo and the docker Group +------------------------- The ``docker`` daemon always runs as root, and since ``docker`` version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user *root*, and so, by default, you can access it with ``sudo``. -Starting in version 0.5.3, if you create a Unix group called *docker* -and add users to it, then the ``docker`` daemon will make the -ownership of the Unix socket read/writable by the *docker* group when -the daemon starts. The ``docker`` daemon must always run as root, but -if you run the ``docker`` client as a user in the *docker* group then -you don't need to add ``sudo`` to all the client commands. +Starting in version 0.5.3, if you (or your Docker installer) create a +Unix group called *docker* and add users to it, then the ``docker`` +daemon will make the ownership of the Unix socket read/writable by the +*docker* group when the daemon starts. The ``docker`` daemon must +always run as root, but if you run the ``docker`` client as a user in +the *docker* group then you don't need to add ``sudo`` to all the +client commands. + +**Example:** .. code-block:: bash - # Add the docker group + # Add the docker group if it doesn't already exist. sudo groupadd docker - # Add the ubuntu user to the docker group + # Add the user "ubuntu" to the docker group. + # Change the user name to match your preferred user. # You may have to logout and log back in again for - # this to take effect + # this to take effect. sudo gpasswd -a ubuntu docker - # Restart the docker daemon + # Restart the docker daemon. sudo service docker restart .. _bind_docker: @@ -87,7 +90,7 @@ you don't need to add ``sudo`` to all the client commands. Bind Docker to another host/port or a Unix socket ------------------------------------------------- -.. DANGER:: Changing the default ``docker`` daemon binding to a TCP +.. warning:: Changing the default ``docker`` daemon binding to a TCP port or Unix *docker* user group will increase your security risks by allowing non-root users to potentially gain *root* access on the host (`e.g. #1369 diff --git a/docs/sources/use/builder.rst b/docs/sources/use/builder.rst index 7c9d3397f2..b8dd95bad0 100644 --- a/docs/sources/use/builder.rst +++ b/docs/sources/use/builder.rst @@ -318,8 +318,9 @@ this optional but default, you could use a CMD: ``VOLUME ["/data"]`` -The ``VOLUME`` instruction will add one or more new volumes to any -container created from the image. +The ``VOLUME`` instruction will create a mount point with the specified name and mark it +as holding externally mounted volumes from native host or other containers. For more information/examples +and mounting instructions via docker client, refer to :ref:`volume_def` documentation. 3.10 USER --------- diff --git a/docs/sources/use/host_integration.rst b/docs/sources/use/host_integration.rst index 92012df3d6..a9f9c1e753 100644 --- a/docs/sources/use/host_integration.rst +++ b/docs/sources/use/host_integration.rst @@ -29,14 +29,32 @@ Here are a few sample scripts for systemd and upstart to integrate with docker. Sample Upstart Script --------------------- +In this example we've already created a container to run Redis with an id of +0a7e070b698b. To create an upstart script for our container, we create a file +named ``/etc/init/redis.conf`` and place the following into it: + .. code-block:: bash description "Redis container" author "Me" - start on filesystem and started lxc-net and started docker + start on filesystem and started docker stop on runlevel [!2345] respawn - exec docker start -a 0a7e070b698b + script + # Wait for docker to finish starting up first. + FILE=/var/run/docker.sock + while [ ! -e $FILE ] ; do + inotifywait -t 2 -e create $(dirname $FILE) + done + /usr/bin/docker start -a 0a7e070b698b + end script + +Next, we have to configure docker so that it's run with the option ``-r=false``. +Run the following command: + +.. code-block:: bash + + $ sudo sh -c "echo 'DOCKER_OPTS=\"-r=false\"' > /etc/default/docker" Sample systemd Script diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 25febad755..9156e574d0 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -30,44 +30,60 @@ Each container can have zero or more data volumes. Getting Started ............... - - -Using data volumes is as simple as adding a new flag: ``-v``. The parameter ``-v`` can be used more than once in order to create more volumes within the new container. The example below shows the instruction to create a container with two new volumes:: +Using data volumes is as simple as adding a new flag: ``-v``. The +parameter ``-v`` can be used more than once in order to create more +volumes within the new container. The example below shows the +instruction to create a container with two new volumes:: docker run -v /var/volume1 -v /var/volume2 shykes/couchdb -For a Dockerfile, the VOLUME instruction will add one or more new volumes to any container created from the image:: +For a Dockerfile, the VOLUME instruction will add one or more new +volumes to any container created from the image:: VOLUME ["/var/volume1", "/var/volume2"] -Create a new container using existing volumes from an existing container: ---------------------------------------------------------------------------- +Mount Volumes from an Existing Container: +----------------------------------------- - -The command below creates a new container which is runnning as daemon ``-d`` and with one volume ``/var/lib/couchdb``:: +The command below creates a new container which is runnning as daemon +``-d`` and with one volume ``/var/lib/couchdb``:: COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03) -From the container id of that previous container ``$COUCH1`` it's possible to create new container sharing the same volume using the parameter ``-volumes-from container_id``:: +From the container id of that previous container ``$COUCH1`` it's +possible to create new container sharing the same volume using the +parameter ``-volumes-from container_id``:: COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03) Now, the second container has the all the information from the first volume. -Create a new container which mounts a host directory into it: -------------------------------------------------------------- +Mount a Host Directory as a Container Volume: +--------------------------------------------- + +:: -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "host-dir" is missing, then docker creates a new volume. - This is not available for a Dockerfile due the portability and sharing purpose of it. The [host-dir] volumes is something 100% host dependent and will break on any other machine. +This is not available for a Dockerfile due the portability and sharing +purpose of it. The [host-dir] volumes is something 100% host dependent +and will break on any other machine. For example:: sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03 -The command above mounts the host directory ``/var/logs`` into the container with read only permissions as ``/var/host_logs``. +The command above mounts the host directory ``/var/logs`` into the +container with read only permissions as ``/var/host_logs``. .. versionadded:: v0.5.0 + +Known Issues +............ + +* :issue:`2702`: "lxc-start: Permission denied - failed to mount" + could indicate a permissions problem with AppArmor. Please see the + issue for a workaround. diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst index 66b2928c96..5faebcc2e5 100644 --- a/docs/sources/use/workingwithrepository.rst +++ b/docs/sources/use/workingwithrepository.rst @@ -177,6 +177,15 @@ you can push and pull it like any other repository, but it will there will be no user name checking performed. Your registry will function completely independently from the Central Index. +.. raw:: html + + + +.. seealso:: `Docker Blog: How to use your own registry + `_ + Authentication file ------------------- diff --git a/engine/job.go b/engine/job.go index c4a2c3ef52..3ccaa8d1a0 100644 --- a/engine/job.go +++ b/engine/job.go @@ -214,7 +214,7 @@ func (job *Job) GetenvList(key string) []string { return l } -func (job *Job) SetenvList(key string, value []string) error { +func (job *Job) SetenvJson(key string, value interface{}) error { sval, err := json.Marshal(value) if err != nil { return err @@ -223,6 +223,10 @@ func (job *Job) SetenvList(key string, value []string) error { return nil } +func (job *Job) SetenvList(key string, value []string) error { + return job.SetenvJson(key, value) +} + func (job *Job) Setenv(key, value string) { job.env = append(job.env, key+"="+value) } diff --git a/graph.go b/graph.go index 31f21d8fbc..6e2a5d7a98 100644 --- a/graph.go +++ b/graph.go @@ -231,12 +231,11 @@ func setupInitLayer(initLayer string) error { if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { return err } - - if f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755); err != nil { + f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { return err - } else { - f.Close() } + f.Close() } } else { return err diff --git a/graph_test.go b/graph_test.go index 2775650c79..c7b295c0e8 100644 --- a/graph_test.go +++ b/graph_test.go @@ -10,7 +10,6 @@ import ( "io" "io/ioutil" "os" - "path" "testing" "time" ) @@ -122,41 +121,6 @@ func TestRegister(t *testing.T) { } } -func TestMount(t *testing.T) { - graph := tempGraph(t) - defer os.RemoveAll(graph.Root) - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - image, err := graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - t.Fatal(err) - } - tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - rootfs := path.Join(tmp, "rootfs") - if err := os.MkdirAll(rootfs, 0700); err != nil { - t.Fatal(err) - } - rw := path.Join(tmp, "rw") - if err := os.MkdirAll(rw, 0700); err != nil { - t.Fatal(err) - } - if _, err := graph.driver.Get(image.ID); err != nil { - t.Fatal(err) - } - // FIXME: test for mount contents - defer func() { - if err := graph.driver.Cleanup(); err != nil { - t.Error(err) - } - }() -} - // Test that an image can be deleted by its shorthand prefix func TestDeletePrefix(t *testing.T) { graph := tempGraph(t) diff --git a/graphdriver/aufs/aufs_test.go b/graphdriver/aufs/aufs_test.go index a33441bbd0..c443fc3ebc 100644 --- a/graphdriver/aufs/aufs_test.go +++ b/graphdriver/aufs/aufs_test.go @@ -456,7 +456,7 @@ func TestDiffSize(t *testing.T) { t.Fatal(err) } - diffSize, err := d.Size("1") + diffSize, err := d.DiffSize("1") if err != nil { t.Fatal(err) } diff --git a/graphdriver/devmapper/driver_test.go b/graphdriver/devmapper/driver_test.go index c3c710fb31..f8704950ad 100644 --- a/graphdriver/devmapper/driver_test.go +++ b/graphdriver/devmapper/driver_test.go @@ -97,6 +97,7 @@ func TestDriverRemove(t *testing.T) { } func TestCleanup(t *testing.T) { + t.Skip("Unimplemented") d := newDriver(t) defer os.RemoveAll(d.home) @@ -160,6 +161,7 @@ func TestCleanup(t *testing.T) { } func TestNotMounted(t *testing.T) { + t.Skip("Not implemented") d := newDriver(t) defer cleanup(d) @@ -291,11 +293,11 @@ func TestDriverGetSize(t *testing.T) { } f.Close() - diffSize, err := d.Size("1") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size %d got %d", size, diffSize) - } + // diffSize, err := d.DiffSize("1") + // if err != nil { + // t.Fatal(err) + // } + // if diffSize != size { + // t.Fatalf("Expected size %d got %d", size, diffSize) + // } } diff --git a/http_test.go b/http_test.go new file mode 100644 index 0000000000..b9ecd6a203 --- /dev/null +++ b/http_test.go @@ -0,0 +1,51 @@ +package docker + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} diff --git a/image.go b/image.go index 727a34a389..c607971820 100644 --- a/image.go +++ b/image.go @@ -52,11 +52,11 @@ func LoadImage(root string) (*Image, error) { return nil, err } } else { - if size, err := strconv.Atoi(string(buf)); err != nil { + size, err := strconv.Atoi(string(buf)) + if err != nil { return nil, err - } else { - img.Size = int64(size) } + img.Size = int64(size) } return img, nil @@ -88,14 +88,14 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.Archive, root, ro // If raw json is provided, then use it if jsonData != nil { return ioutil.WriteFile(jsonPath(root), jsonData, 0600) - } else { // Otherwise, unmarshal the image - jsonData, err := json.Marshal(img) - if err != nil { - return err - } - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } + } + // Otherwise, unmarshal the image + jsonData, err := json.Marshal(img) + if err != nil { + return err + } + if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { + return err } // Compute and save the size of the rootfs size, err := utils.TreeSize(rootfs) @@ -123,11 +123,11 @@ func jsonPath(root string) string { } // TarLayer returns a tar archive of the image's filesystem layer. -func (image *Image) TarLayer(compression archive.Compression) (archive.Archive, error) { - if image.graph == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", image.ID) +func (img *Image) TarLayer(compression archive.Compression) (archive.Archive, error) { + if img.graph == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } - layerPath, err := image.graph.driver.Get(image.ID) + layerPath, err := img.graph.driver.Get(img.ID) if err != nil { return nil, err } diff --git a/api_test.go b/integration/api_test.go similarity index 57% rename from api_test.go rename to integration/api_test.go index a58a831a37..a66cbe561f 100644 --- a/api_test.go +++ b/integration/api_test.go @@ -6,101 +6,66 @@ import ( "bytes" "encoding/json" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io" "net" "net/http" "net/http/httptest" - "os" - "path" "strings" "testing" "time" ) -func TestGetBoolParam(t *testing.T) { - if ret, err := getBoolParam("true"); err != nil || !ret { - t.Fatalf("true -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("True"); err != nil || !ret { - t.Fatalf("True -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("1"); err != nil || !ret { - t.Fatalf("1 -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam(""); err != nil || ret { - t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("false"); err != nil || ret { - t.Fatalf("false -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("0"); err != nil || ret { - t.Fatalf("0 -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("faux"); err == nil || ret { - t.Fatalf("faux -> false, err | got %t %s", ret, err) - } -} - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} - func TestGetVersion(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + var err error - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} - r := httptest.NewRecorder() - if err := getVersion(srv, APIVERSION, r, nil, nil); err != nil { + req, err := http.NewRequest("GET", "/version", nil) + if err != nil { t.Fatal(err) } + // FIXME getting the version should require an actual running Server + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) - v := &APIVersion{} + v := &docker.APIVersion{} if err = json.Unmarshal(r.Body.Bytes(), v); err != nil { t.Fatal(err) } - if v.Version != VERSION { - t.Errorf("Expected version %s, %s found", VERSION, v.Version) + if v.Version != docker.VERSION { + t.Errorf("Expected version %s, %s found", docker.VERSION, v.Version) } } func TestGetInfo(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - initialImages, err := srv.runtime.graph.Map() + initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - r := httptest.NewRecorder() - - if err := getInfo(srv, APIVERSION, r, nil, nil); err != nil { + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { t.Fatal(err) } + r := httptest.NewRecorder() - infos := &APIInfo{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + infos := &docker.APIInfo{} err = json.Unmarshal(r.Body.Bytes(), infos) if err != nil { t.Fatal(err) @@ -111,16 +76,22 @@ func TestGetInfo(t *testing.T) { } func TestGetEvents(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + srv := mkServerFromEngine(eng, t) + // FIXME: we might not need runtime, why not simply nuke + // the engine? + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - srv := &Server{ - runtime: runtime, - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") + var events []*utils.JSONMessage + for _, parts := range [][3]string{ + {"fakeaction", "fakeid", "fakeimage"}, + {"fakeaction2", "fakeid", "fakeimage"}, + } { + action, id, from := parts[0], parts[1], parts[2] + ev := srv.LogEvent(action, id, from) + events = append(events, ev) + } req, err := http.NewRequest("GET", "/events?since=1", nil) if err != nil { @@ -129,9 +100,10 @@ func TestGetEvents(t *testing.T) { r := httptest.NewRecorder() setTimeout(t, "", 500*time.Millisecond, func() { - if err := getEvents(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) }) dec := json.NewDecoder(r.Body) @@ -142,7 +114,7 @@ func TestGetEvents(t *testing.T) { } else if err != nil { t.Fatal(err) } - if jm != srv.events[i] { + if jm != *events[i] { t.Fatalf("Event received it different than expected") } } @@ -150,10 +122,9 @@ func TestGetEvents(t *testing.T) { } func TestGetImagesJSON(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // all=0 @@ -169,11 +140,12 @@ func TestGetImagesJSON(t *testing.T) { r := httptest.NewRecorder() - if err := getImagesJSON(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) - images := []APIImages{} + images := []docker.APIImages{} if err := json.Unmarshal(r.Body.Bytes(), &images); err != nil { t.Fatal(err) } @@ -206,12 +178,12 @@ func TestGetImagesJSON(t *testing.T) { if err != nil { t.Fatal(err) } - - if err := getImagesJSON(srv, APIVERSION, r2, req2, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { t.Fatal(err) } + assertHttpNotError(r2, t) - images2 := []APIImages{} + images2 := []docker.APIImages{} if err := json.Unmarshal(r2.Body.Bytes(), &images2); err != nil { t.Fatal(err) } @@ -222,13 +194,13 @@ func TestGetImagesJSON(t *testing.T) { found = false for _, img := range images2 { - if img.ID == GetTestImage(runtime).ID { + if img.ID == unitTestImageID { found = true break } } if !found { - t.Errorf("Retrieved image Id differs, expected %s, received %+v", GetTestImage(runtime).ID, images2) + t.Errorf("Retrieved image Id differs, expected %s, received %+v", unitTestImageID, images2) } r3 := httptest.NewRecorder() @@ -239,11 +211,12 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - if err := getImagesJSON(srv, APIVERSION, r3, req3, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r3, req3); err != nil { t.Fatal(err) } + assertHttpNotError(r3, t) - images3 := []APIImages{} + images3 := []docker.APIImages{} if err := json.Unmarshal(r3.Body.Bytes(), &images3); err != nil { t.Fatal(err) } @@ -260,34 +233,32 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - err = getImagesJSON(srv, APIVERSION, r4, req4, nil) - if err == nil { - t.Fatalf("Error expected, received none") + if err := docker.ServeRequest(srv, docker.APIVERSION, r4, req4); err != nil { + t.Fatal(err) } - - if !strings.HasPrefix(err.Error(), "Bad parameter") { - t.Fatalf("Error should starts with \"Bad parameter\"") - } - http.Error(r4, err.Error(), http.StatusBadRequest) - + // Don't assert against HTTP error since we expect an error if r4.Code != http.StatusBadRequest { t.Fatalf("%d Bad Request expected, received %d\n", http.StatusBadRequest, r4.Code) } } func TestGetImagesHistory(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - if err := getImagesHistory(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil { + req, err := http.NewRequest("GET", fmt.Sprintf("/images/%s/history", unitTestImageName), nil) + if err != nil { t.Fatal(err) } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) - history := []APIHistory{} + history := []docker.APIHistory{} if err := json.Unmarshal(r.Body.Bytes(), &history); err != nil { t.Fatal(err) } @@ -297,17 +268,22 @@ func TestGetImagesHistory(t *testing.T) { } func TestGetImagesByName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - r := httptest.NewRecorder() - if err := getImagesByName(srv, APIVERSION, r, nil, map[string]string{"name": unitTestImageName}); err != nil { + req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil) + if err != nil { t.Fatal(err) } - img := &Image{} + r := httptest.NewRecorder() + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + + img := &docker.Image{} if err := json.Unmarshal(r.Body.Bytes(), img); err != nil { t.Fatal(err) } @@ -317,21 +293,16 @@ func TestGetImagesByName(t *testing.T) { } func TestGetContainersJSON(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} + beginLen := len(srv.Containers(true, false, -1, "", "")) - beginLen := runtime.containers.Len() - - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, &docker.Config{ + Image: unitTestImageID, Cmd: []string{"echo", "test"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) + }, t) req, err := http.NewRequest("GET", "/containers/json?all=1", nil) if err != nil { @@ -339,48 +310,47 @@ func TestGetContainersJSON(t *testing.T) { } r := httptest.NewRecorder() - if err := getContainersJSON(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } - containers := []APIContainers{} + assertHttpNotError(r, t) + containers := []docker.APIContainers{} if err := json.Unmarshal(r.Body.Bytes(), &containers); err != nil { t.Fatal(err) } if len(containers) != beginLen+1 { t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers), beginLen) } - if containers[0].ID != container.ID { - t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", container.ID, containers[0].ID) + if containers[0].ID != containerID { + t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, containers[0].ID) } } func TestGetContainersExport(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, - "", + t, ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + + req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) if err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - if err = getContainersExport(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) @@ -406,33 +376,30 @@ func TestGetContainersExport(t *testing.T) { } func TestGetContainersChanges(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/rm", "/etc/passwd"}, }, - "", + t, ) + containerRun(eng, containerID, t) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) if err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } - - r := httptest.NewRecorder() - if err := getContainersChanges(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { - t.Fatal(err) - } - changes := []Change{} + assertHttpNotError(r, t) + changes := []docker.Change{} if err := json.Unmarshal(r.Body.Bytes(), &changes); err != nil { t.Fatal(err) } @@ -451,64 +418,57 @@ func TestGetContainersChanges(t *testing.T) { func TestGetContainersTop(t *testing.T) { t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'") - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) defer func() { // Make sure the process dies before destroying runtime - container.stdin.Close() - container.WaitTimeout(2 * time.Second) + containerKill(eng, containerID, t) + containerWait(eng, containerID, t) }() - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { - if container.State.Running { + if containerRunning(eng, containerID, t) { break } time.Sleep(10 * time.Millisecond) } }) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } // Make sure sh spawn up cat setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { - in, _ := container.StdinPipe() - out, _ := container.StdoutPipe() + in, out := containerAttach(eng, containerID, t) if err := assertPipe("hello\n", "hello", out, in, 15); err != nil { t.Fatal(err) } }) r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/"+container.ID+"/top?ps_args=u", bytes.NewReader([]byte{})) + req, err := http.NewRequest("GET", "/"+containerID+"/top?ps_args=u", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := getContainersTop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } - procs := APITop{} + assertHttpNotError(r, t) + procs := docker.APITop{} if err := json.Unmarshal(r.Body.Bytes(), &procs); err != nil { t.Fatal(err) } @@ -532,90 +492,83 @@ func TestGetContainersTop(t *testing.T) { } func TestGetContainersByName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, - "", + t, ) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/containers/"+containerID+"/json", nil) if err != nil { t.Fatal(err) } - defer runtime.Destroy(container) - - r := httptest.NewRecorder() - if err := getContainersByName(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } - outContainer := &Container{} + assertHttpNotError(r, t) + outContainer := &docker.Container{} if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil { t.Fatal(err) } - if outContainer.ID != container.ID { - t.Fatalf("Wrong containers retrieved. Expected %s, received %s", container.ID, outContainer.ID) + if outContainer.ID != containerID { + t.Fatalf("Wrong containers retrieved. Expected %s, received %s", containerID, outContainer.ID) } } func TestPostCommit(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Run(); err != nil { - t.Fatal(err) - } + containerRun(eng, containerID, t) - req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+container.ID, bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postCommit(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiID := &APIID{} + apiID := &docker.APIID{} if err := json.Unmarshal(r.Body.Bytes(), apiID); err != nil { t.Fatal(err) } - if _, err := runtime.graph.Get(apiID.ID); err != nil { - t.Fatalf("The image has not been commited") + if _, err := srv.ImageInspect(apiID.ID); err != nil { + t.Fatalf("The image has not been committed") } } func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) - configJSON, err := json.Marshal(&Config{ - Image: GetTestImage(runtime).ID, + configJSON, err := json.Marshal(&docker.Config{ + Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, }) @@ -629,150 +582,132 @@ func TestPostContainersCreate(t *testing.T) { } r := httptest.NewRecorder() - if err := postContainersCreate(srv, APIVERSION, r, req, nil); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } - apiRun := &APIRun{} + apiRun := &docker.APIRun{} if err := json.Unmarshal(r.Body.Bytes(), apiRun); err != nil { t.Fatal(err) } + containerID := apiRun.ID - container := srv.runtime.Get(apiRun.ID) - if container == nil { - t.Fatalf("Container not created") - } + containerAssertExists(eng, containerID, t) + containerRun(eng, containerID, t) - if err := container.Run(); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(path.Join(container.RootfsPath(), "test")); err != nil { - if os.IsNotExist(err) { - utils.Debugf("Err: %s", err) - t.Fatalf("The test file has not been created") - } - t.Fatal(err) + if !containerFileExists(eng, containerID, "test", t) { + t.Fatal("Test file was not created") } } func TestPostContainersKill(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r := httptest.NewRecorder() - if err := postContainersKill(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { + req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{})) + if err != nil { t.Fatal(err) } + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been killed") } } func TestPostContainersRestart(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/restart?t=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postContainersRestart(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } // Give some time to the process to restart - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } - if err := container.Kill(); err != nil { - t.Fatal(err) - } + containerKill(eng, containerID, t) } func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) - id := createTestContainer( + containerID := createTestContainer( eng, - &Config{ - Image: GetTestImage(runtime).ID, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - t) + t, + ) - hostConfigJSON, err := json.Marshal(&HostConfig{}) + hostConfigJSON, err := json.Marshal(&docker.HostConfig{}) - req, err := http.NewRequest("POST", "/containers/"+id+"/start", bytes.NewReader(hostConfigJSON)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } @@ -780,110 +715,101 @@ func TestPostContainersStart(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - container := runtime.Get(id) - if container == nil { - t.Fatalf("Container %s was not created", id) - } + containerAssertExists(eng, containerID, t) // Give some time to the process to start // FIXME: use Wait once it's available as a job - container.WaitTimeout(500 * time.Millisecond) - if !container.State.Running { + containerWaitTimeout(eng, containerID, t) + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r = httptest.NewRecorder() - if err = postContainersStart(srv, APIVERSION, r, req, map[string]string{"name": id}); err == nil { - t.Fatalf("A running container should be able to be started") - } - - if err := container.Kill(); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + // Starting an already started container should return an error + // FIXME: verify a precise error code. There is a possible bug here + // which causes this to return 404 even though the container exists. + assertHttpError(r, t) + containerAssertExists(eng, containerID, t) + containerKill(eng, containerID, t) } func TestPostContainersStop(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) // Give some time to the process to start - container.WaitTimeout(500 * time.Millisecond) + containerWaitTimeout(eng, containerID, t) - if !container.State.Running { + if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } // Note: as it is a POST request, it requires a body. - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/stop?t=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := postContainersStop(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been stopped") } } func TestPostContainersWait(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) setTimeout(t, "Wait timed out", 3*time.Second, func() { r := httptest.NewRecorder() - if err := postContainersWait(srv, APIVERSION, r, nil, map[string]string{"name": container.ID}); err != nil { + req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{})) + if err != nil { t.Fatal(err) } - apiWait := &APIWait{} + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) + apiWait := &docker.APIWait{} if err := json.Unmarshal(r.Body.Bytes(), apiWait); err != nil { t.Fatal(err) } @@ -892,34 +818,26 @@ func TestPostContainersWait(t *testing.T) { } }) - if container.State.Running { + if containerRunning(eng, containerID, t) { t.Fatalf("The container should be stopped after wait") } } func TestPostContainersAttach(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - // Start the process - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() @@ -927,7 +845,7 @@ func TestPostContainersAttach(t *testing.T) { // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) - container.Kill() + containerKill(eng, containerID, t) }() // Attach to it @@ -941,14 +859,15 @@ func TestPostContainersAttach(t *testing.T) { out: stdoutPipe, } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack @@ -975,40 +894,29 @@ func TestPostContainersAttach(t *testing.T) { // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing - err = container.WaitTimeout(500 * time.Millisecond) - if err == nil || !container.State.Running { - t.Fatalf("/bin/cat is not running after closing stdin") - } + containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin, _ := container.StdinPipe() + cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() - container.Wait() + containerWait(eng, containerID, t) } func TestPostContainersAttachStderr(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - // Start the process - if err := container.Start(); err != nil { - t.Fatal(err) - } + startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() @@ -1016,7 +924,7 @@ func TestPostContainersAttachStderr(t *testing.T) { // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) - container.Kill() + containerKill(eng, containerID, t) }() // Attach to it @@ -1030,14 +938,15 @@ func TestPostContainersAttachStderr(t *testing.T) { out: stdoutPipe, } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } - if err := postContainersAttach(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack @@ -1064,104 +973,76 @@ func TestPostContainersAttachStderr(t *testing.T) { // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing - err = container.WaitTimeout(500 * time.Millisecond) - if err == nil || !container.State.Running { - t.Fatalf("/bin/cat is not running after closing stdin") - } + containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin, _ := container.StdinPipe() + cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() - container.Wait() + containerWait(eng, containerID, t) } // FIXME: Test deleting running container // FIXME: Test deleting container with volume // FIXME: Test deleting volume in use by other container func TestDeleteContainers(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) - srv := &Server{runtime: runtime} - - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"touch", "/test"}, - }, "") - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("DELETE", "/containers/"+container.ID, nil) + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, + Cmd: []string{"touch", "/test"}, + }, + t, + ) + req, err := http.NewRequest("DELETE", "/containers/"+containerID, nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() - if err := deleteContainers(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } - - if c := runtime.Get(container.ID); c != nil { - t.Fatalf("The container as not been deleted") - } - - if _, err := os.Stat(path.Join(container.RootfsPath(), "test")); err == nil { - t.Fatalf("The test file has not been deleted") - } + containerAssertNotExists(eng, containerID, t) } func TestOptionsRoute(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - runtime.config.EnableCors = true - srv := &Server{runtime: runtime} - + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - router, err := createRouter(srv, false) - if err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } - - router.ServeHTTP(r, req) + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } } func TestGetEnabledCors(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - runtime.config.EnableCors = true - srv := &Server{runtime: runtime} - + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) r := httptest.NewRecorder() - router, err := createRouter(srv, false) - if err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } - - router.ServeHTTP(r, req) + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } @@ -1182,20 +1063,18 @@ func TestGetEnabledCors(t *testing.T) { } func TestDeleteImages(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("test", "test", unitTestImageName, true); err != nil { + if err := srv.ContainerTag(unitTestImageName, "test", "test", false); err != nil { t.Fatal(err) } - images, err := srv.Images(false, "") if err != nil { t.Fatal(err) @@ -1211,8 +1090,11 @@ func TestDeleteImages(t *testing.T) { } r := httptest.NewRecorder() - if err := deleteImages(srv, APIVERSION, r, req, map[string]string{"name": unitTestImageID}); err == nil { - t.Fatalf("Expected conflict error, got none") + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if r.Code != http.StatusConflict { + t.Fatalf("Expected http status 409-conflict, got %v", r.Code) } req2, err := http.NewRequest("DELETE", "/images/test:test", nil) @@ -1221,14 +1103,15 @@ func TestDeleteImages(t *testing.T) { } r2 := httptest.NewRecorder() - if err := deleteImages(srv, APIVERSION, r2, req2, map[string]string{"name": "test:test"}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r2, req2); err != nil { t.Fatal(err) } + assertHttpNotError(r2, t) if r2.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } - var outs []APIRmi + var outs []docker.APIRmi if err := json.Unmarshal(r2.Body.Bytes(), &outs); err != nil { t.Fatal(err) } @@ -1243,63 +1126,40 @@ func TestDeleteImages(t *testing.T) { if len(images[0].RepoTags) != len(initialImages[0].RepoTags) { t.Errorf("Expected %d image, %d found", len(initialImages), len(images)) } - - // FIXME: check that container has been deleted, and its filesystem too -} - -func TestJsonContentType(t *testing.T) { - if !matchesContentType("application/json", "application/json") { - t.Fail() - } - - if !matchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if matchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } } func TestPostContainersCopy(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) // Create a container and remove a file - container, _, err := runtime.Create( - &Config{ - Image: GetTestImage(runtime).ID, + containerID := createTestContainer(eng, + &docker.Config{ + Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, - "", + t, ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - if err := container.Run(); err != nil { - t.Fatal(err) - } + containerRun(eng, containerID, t) r := httptest.NewRecorder() - copyData := APICopy{HostPath: ".", Resource: "/test.txt"} + copyData := docker.APICopy{HostPath: ".", Resource: "/test.txt"} jsonData, err := json.Marshal(copyData) if err != nil { t.Fatal(err) } - req, err := http.NewRequest("POST", "/containers/"+container.ID+"/copy", bytes.NewReader(jsonData)) + req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", bytes.NewReader(jsonData)) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err = postContainersCopy(srv, APIVERSION, r, req, map[string]string{"name": container.ID}); err != nil { + if err := docker.ServeRequest(srv, docker.APIVERSION, r, req); err != nil { t.Fatal(err) } + assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) diff --git a/integration/auth_test.go b/integration/auth_test.go new file mode 100644 index 0000000000..07559c01cf --- /dev/null +++ b/integration/auth_test.go @@ -0,0 +1,61 @@ +package docker + +import ( + "crypto/rand" + "encoding/hex" + "github.com/dotcloud/docker/auth" + "os" + "strings" + "testing" +) + +// FIXME: these tests have an external dependency on a staging index hosted +// on the docker.io infrastructure. That dependency should be removed. +// - Unit tests should have no side-effect dependencies. +// - Integration tests should have side-effects limited to the host environment being tested. + +func TestLogin(t *testing.T) { + os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") + defer os.Setenv("DOCKER_INDEX_URL", "") + authConfig := &auth.AuthConfig{Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@dotcloud.com"} + status, err := auth.Login(authConfig, nil) + if err != nil { + t.Fatal(err) + } + if status != "Login Succeeded" { + t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status) + } +} + +func TestCreateAccount(t *testing.T) { + os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") + defer os.Setenv("DOCKER_INDEX_URL", "") + tokenBuffer := make([]byte, 16) + _, err := rand.Read(tokenBuffer) + if err != nil { + t.Fatal(err) + } + token := hex.EncodeToString(tokenBuffer)[:12] + username := "ut" + token + authConfig := &auth.AuthConfig{Username: username, Password: "test42", Email: "docker-ut+" + token + "@example.com"} + status, err := auth.Login(authConfig, nil) + if err != nil { + t.Fatal(err) + } + expectedStatus := "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + if status != expectedStatus { + t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) + } + + status, err = auth.Login(authConfig, nil) + if err == nil { + t.Fatalf("Expected error but found nil instead") + } + + expectedError := "Login: Account is not Active" + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err) + } +} diff --git a/buildfile_test.go b/integration/buildfile_test.go similarity index 83% rename from buildfile_test.go rename to integration/buildfile_test.go index d3fca3c788..964b58403b 100644 --- a/buildfile_test.go +++ b/integration/buildfile_test.go @@ -2,7 +2,9 @@ package docker import ( "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" "io/ioutil" "net" "net/http" @@ -14,7 +16,7 @@ import ( // mkTestContext generates a build context from the contents of the provided dockerfile. // This context is suitable for use as an argument to BuildFile.Build() func mkTestContext(dockerfile string, files [][2]string, t *testing.T) archive.Archive { - context, err := mkBuildContext(dockerfile, files) + context, err := docker.MkBuildContext(dockerfile, files) if err != nil { t.Fatal(err) } @@ -228,17 +230,15 @@ func TestBuild(t *testing.T) { } } -func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache bool) *Image { - if srv == nil { - runtime := mkRuntime(t) +func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) *docker.Image { + if eng == nil { + eng = NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + // FIXME: we might not need runtime, why not simply nuke + // the engine? defer nuke(runtime) - - srv = &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } } + srv := mkServerFromEngine(eng, t) httpServer, err := mkTestingFileServer(context.remoteFiles) if err != nil { @@ -252,10 +252,17 @@ func buildImage(context testContextTemplate, t *testing.T, srv *Server, useCache } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, useCache, false) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, useCache, false) id, err := buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err != nil { t.Fatal(err) @@ -368,20 +375,14 @@ func TestBuildEntrypoint(t *testing.T) { // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func TestBuildEntrypointRunCleanup(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) img := buildImage(testContextTemplate{` from {IMAGE} run echo "hello" `, - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) img = buildImage(testContextTemplate{` from {IMAGE} @@ -389,7 +390,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { add foo /foo entrypoint ["/bin/echo"] `, - [][2]string{{"foo", "HEYO"}}, nil}, t, srv, true) + [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true) if len(img.Config.Cmd) != 0 { t.Fail() @@ -397,14 +398,8 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { } func TestBuildImageWithCache(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) template := testContextTemplate{` from {IMAGE} @@ -412,11 +407,11 @@ func TestBuildImageWithCache(t *testing.T) { `, nil, nil} - img := buildImage(template, t, srv, true) + img := buildImage(template, t, eng, true) imageId := img.ID img = nil - img = buildImage(template, t, srv, true) + img = buildImage(template, t, eng, true) if imageId != img.ID { t.Logf("Image ids should match: %s != %s", imageId, img.ID) @@ -425,14 +420,8 @@ func TestBuildImageWithCache(t *testing.T) { } func TestBuildImageWithoutCache(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) template := testContextTemplate{` from {IMAGE} @@ -440,11 +429,11 @@ func TestBuildImageWithoutCache(t *testing.T) { `, nil, nil} - img := buildImage(template, t, srv, true) + img := buildImage(template, t, eng, true) imageId := img.ID img = nil - img = buildImage(template, t, srv, false) + img = buildImage(template, t, eng, false) if imageId == img.ID { t.Logf("Image ids should not match: %s == %s", imageId, img.ID) @@ -453,14 +442,9 @@ func TestBuildImageWithoutCache(t *testing.T) { } func TestForbiddenContextPath(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) + srv := mkServerFromEngine(eng, t) context := testContextTemplate{` from {IMAGE} @@ -481,10 +465,17 @@ func TestForbiddenContextPath(t *testing.T) { } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) + buildfile := docker.NewBuildFile(srv, ioutil.Discard, false, true, false) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -499,14 +490,8 @@ func TestForbiddenContextPath(t *testing.T) { } func TestBuildADDFileNotFound(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) context := testContextTemplate{` from {IMAGE} @@ -526,10 +511,17 @@ func TestBuildADDFileNotFound(t *testing.T) { } port := httpServer.URL[idx+1:] - ip := srv.runtime.networkManager.bridgeNetwork.IP + iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") + if iIP == nil { + t.Fatal("Legacy bridgeIP field not set in engine") + } + ip, ok := iIP.(net.IP) + if !ok { + panic("Legacy bridgeIP field in engine does not cast to net.IP") + } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := NewBuildFile(srv, ioutil.Discard, false, true, false) + buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, false, true, false) _, err = buildfile.Build(mkTestContext(dockerfile, context.files, t)) if err == nil { @@ -544,26 +536,20 @@ func TestBuildADDFileNotFound(t *testing.T) { } func TestBuildInheritance(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) img := buildImage(testContextTemplate{` from {IMAGE} expose 4243 `, - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) img2 := buildImage(testContextTemplate{fmt.Sprintf(` from %s entrypoint ["/bin/echo"] `, img.ID), - nil, nil}, t, srv, true) + nil, nil}, t, eng, true) // from child if img2.Config.Entrypoint[0] != "/bin/echo" { diff --git a/commands_test.go b/integration/commands_test.go similarity index 88% rename from commands_test.go rename to integration/commands_test.go index 657ed1d575..167544a24b 100644 --- a/commands_test.go +++ b/integration/commands_test.go @@ -3,6 +3,8 @@ package docker import ( "bufio" "fmt" + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -66,8 +68,8 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -111,8 +113,8 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -156,8 +158,8 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -201,8 +203,8 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -254,8 +256,8 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -299,8 +301,8 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { @@ -356,8 +358,8 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -420,8 +422,8 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -466,8 +468,8 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { @@ -477,7 +479,7 @@ func TestAttachDetach(t *testing.T) { } }() - var container *Container + var container *docker.Container setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { buf := make([]byte, 1024) @@ -498,7 +500,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch = make(chan struct{}) go func() { @@ -546,8 +548,8 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) go stdout.Read(make([]byte, 1024)) setTimeout(t, "Starting container timed out", 2*time.Second, func() { @@ -560,7 +562,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch := make(chan struct{}) go func() { @@ -608,8 +610,8 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode @@ -677,8 +679,8 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -712,8 +714,8 @@ func TestRunAutoRemove(t *testing.T) { } func TestCmdLogs(t *testing.T) { - cli := NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { t.Fatal(err) @@ -730,8 +732,8 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: using / as a bind mount source should throw an error func TestRunErrorBindMountRootSource(t *testing.T) { - cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -749,8 +751,8 @@ func TestRunErrorBindMountRootSource(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { @@ -768,11 +770,10 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) - srv := &Server{runtime: globalRuntime} - image := buildTestImages(t, srv) + image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { @@ -819,11 +820,10 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) - srv := &Server{runtime: globalRuntime} - image := buildTestImages(t, srv) + image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { @@ -866,7 +866,7 @@ func TestImagesTree(t *testing.T) { }) } -func buildTestImages(t *testing.T, srv *Server) *Image { +func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image { var testBuilder = testContextTemplate{ ` @@ -879,9 +879,9 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] nil, nil, } - image := buildImage(testBuilder, t, srv, true) + image := buildImage(testBuilder, t, eng, true) - err := srv.ContainerTag(image.ID, "test", "latest", false) + err := mkServerFromEngine(eng, t).ContainerTag(image.ID, "test", "latest", false) if err != nil { t.Fatal(err) } @@ -901,8 +901,8 @@ func TestRunCidFile(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - defer cleanup(globalRuntime) + cli := docker.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { diff --git a/container_test.go b/integration/container_test.go similarity index 79% rename from container_test.go rename to integration/container_test.go index 8899fbf609..757ffb04b1 100644 --- a/container_test.go +++ b/integration/container_test.go @@ -3,10 +3,10 @@ package docker import ( "bufio" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io" "io/ioutil" - "math/rand" "os" "path" "regexp" @@ -20,7 +20,7 @@ func TestIDFormat(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/sh", "-c", "echo hello world"}, }, @@ -41,7 +41,7 @@ func TestIDFormat(t *testing.T) { func TestMultipleAttachRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer( + container, _, _ := mkContainer( runtime, []string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"}, t, @@ -134,10 +134,11 @@ func TestMultipleAttachRestart(t *testing.T) { } func TestDiff(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) // Create a container and remove a file - container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) defer runtime.Destroy(container1) // The changelog should be empty and not fail before run. See #1705 @@ -169,17 +170,13 @@ func TestDiff(t *testing.T) { } // Commit the container - rwTar, err := container1.ExportRw() - if err != nil { - t.Fatal(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil) + img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil) if err != nil { t.Fatal(err) } // Create a new container from the commited image - container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) defer runtime.Destroy(container2) if err := container2.Run(); err != nil { @@ -198,7 +195,7 @@ func TestDiff(t *testing.T) { } // Create a new container - container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) + container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) defer runtime.Destroy(container3) if err := container3.Run(); err != nil { @@ -224,7 +221,7 @@ func TestDiff(t *testing.T) { func TestCommitAutoRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.Running { @@ -237,17 +234,13 @@ func TestCommitAutoRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}}) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &docker.Config{Cmd: []string{"cat", "/world"}}) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _ := mkContainer(runtime, []string{img.ID}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { @@ -284,7 +277,7 @@ func TestCommitRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.Running { @@ -297,17 +290,13 @@ func TestCommitRun(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world - container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) + container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { @@ -343,7 +332,7 @@ func TestCommitRun(t *testing.T) { func TestStart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) + container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) defer runtime.Destroy(container) cStdin, err := container.StdinPipe() @@ -373,7 +362,7 @@ func TestStart(t *testing.T) { func TestRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container) if container.State.Running { @@ -391,7 +380,7 @@ func TestOutput(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -414,7 +403,7 @@ func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, }, @@ -436,7 +425,7 @@ func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, @@ -448,7 +437,9 @@ func TestKillDifferentUser(t *testing.T) { t.Fatal(err) } defer runtime.Destroy(container) - defer container.stdin.Close() + // FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case + // there is a side effect I'm not seeing. + // defer container.stdin.Close() if container.State.Running { t.Errorf("Container shouldn't be running") @@ -490,22 +481,35 @@ func TestKillDifferentUser(t *testing.T) { // Test that creating a container with a volume doesn't crash. Regression test for #995. func TestCreateVolume(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := ParseRun([]string{"-v", "/var/lib/data", GetTestImage(runtime).ID, "echo", "hello", "world"}, nil) + config, hc, _, err := docker.ParseRun([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) if err != nil { t.Fatal(err) } - c, _, err := runtime.Create(config, "") - if err != nil { + jobCreate := eng.Job("create") + if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } - defer runtime.Destroy(c) - c.hostConfig = hc - if err := c.Start(); err != nil { + var id string + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { t.Fatal(err) } + jobStart := eng.Job("start", id) + if err := jobStart.ImportEnv(hc); err != nil { + t.Fatal(err) + } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + // FIXME: this hack can be removed once Wait is a job + c := runtime.Get(id) + if c == nil { + t.Fatalf("Couldn't retrieve container %s from runtime", id) + } c.WaitTimeout(500 * time.Millisecond) c.Wait() } @@ -513,7 +517,7 @@ func TestCreateVolume(t *testing.T) { func TestKill(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -557,7 +561,7 @@ func TestExitCode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - trueContainer, _, err := runtime.Create(&Config{ + trueContainer, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/true", ""}, }, "") @@ -572,7 +576,7 @@ func TestExitCode(t *testing.T) { t.Errorf("Unexpected exit code %d (expected 0)", trueContainer.State.ExitCode) } - falseContainer, _, err := runtime.Create(&Config{ + falseContainer, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/false", ""}, }, "") @@ -591,7 +595,7 @@ func TestExitCode(t *testing.T) { func TestRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, @@ -622,7 +626,7 @@ func TestRestart(t *testing.T) { func TestRestartStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -700,7 +704,7 @@ func TestUser(t *testing.T) { defer nuke(runtime) // Default user must be root - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, }, @@ -719,7 +723,7 @@ func TestUser(t *testing.T) { } // Set a username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -740,7 +744,7 @@ func TestUser(t *testing.T) { } // Set a UID - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -761,7 +765,7 @@ func TestUser(t *testing.T) { } // Set a different user by uid - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -784,7 +788,7 @@ func TestUser(t *testing.T) { } // Set a different user by username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -805,7 +809,7 @@ func TestUser(t *testing.T) { } // Test an wrong username - container, _, err = runtime.Create(&Config{ + container, _, err = runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, @@ -827,7 +831,7 @@ func TestMultipleContainers(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _, err := runtime.Create(&Config{ + container1, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -838,7 +842,7 @@ func TestMultipleContainers(t *testing.T) { } defer runtime.Destroy(container1) - container2, _, err := runtime.Create(&Config{ + container2, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, @@ -882,7 +886,7 @@ func TestMultipleContainers(t *testing.T) { func TestStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -927,7 +931,7 @@ func TestStdin(t *testing.T) { func TestTty(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, @@ -974,7 +978,7 @@ func TestEnv(t *testing.T) { os.Setenv("TRICKY", "tri\ncky\n") runtime := mkRuntime(t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) + config, _, _, err := docker.ParseRun([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) if err != nil { t.Fatal(err) } @@ -1028,7 +1032,7 @@ func TestEntrypoint(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo"}, Cmd: []string{"-n", "foobar"}, @@ -1052,7 +1056,7 @@ func TestEntrypointNoCmd(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo", "foobar"}, }, @@ -1071,96 +1075,11 @@ func TestEntrypointNoCmd(t *testing.T) { } } -func grepFile(t *testing.T, path string, pattern string) { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - var ( - line string - ) - err = nil - for err == nil { - line, err = r.ReadString('\n') - if strings.Contains(line, pattern) == true { - return - } - } - t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) -} - -func TestLXCConfig(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - // Memory is allocated randomly for testing - rand.Seed(time.Now().UTC().UnixNano()) - memMin := 33554432 - memMax := 536870912 - mem := memMin + rand.Intn(memMax-memMin) - // CPU shares as well - cpuMin := 100 - cpuMax := 10000 - cpu := cpuMin + rand.Intn(cpuMax-cpuMin) - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/true"}, - - Hostname: "foobar", - Memory: int64(mem), - CpuShares: int64(cpu), - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - container.generateLXCConfig() - grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") - grepFile(t, container.lxcConfigPath(), - fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) - grepFile(t, container.lxcConfigPath(), - fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) -} - -func TestCustomLxcConfig(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/true"}, - - Hostname: "foobar", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{ - { - Key: "lxc.utsname", - Value: "docker", - }, - { - Key: "lxc.cgroup.cpuset.cpus", - Value: "0,1", - }, - }} - - container.generateLXCConfig() - grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") - grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") -} - func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1193,7 +1112,7 @@ func BenchmarkRunParallel(b *testing.B) { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, @@ -1244,11 +1163,12 @@ func tempDir(t *testing.T) string { // Test for #1737 func TestCopyVolumeUidGid(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() // Add directory not owned by root - container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) + container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) defer r.Destroy(container1) if container1.State.Running { @@ -1261,11 +1181,7 @@ func TestCopyVolumeUidGid(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } @@ -1273,7 +1189,7 @@ func TestCopyVolumeUidGid(t *testing.T) { // Test that the uid and gid is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) + stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) if !strings.Contains(stdout1, "daemon daemon") { t.Fatal("Container failed to transfer uid and gid to volume") } @@ -1281,11 +1197,12 @@ func TestCopyVolumeUidGid(t *testing.T) { // Test for #1582 func TestCopyVolumeContent(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() // Put some content in a directory of a container and commit it - container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) + container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) defer r.Destroy(container1) if container1.State.Running { @@ -1298,11 +1215,7 @@ func TestCopyVolumeContent(t *testing.T) { t.Errorf("Container shouldn't be running") } - rwTar, err := container1.ExportRw() - if err != nil { - t.Error(err) - } - img, err := r.graph.Create(rwTar, container1, "unit test commited image", "", nil) + img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } @@ -1310,31 +1223,33 @@ func TestCopyVolumeContent(t *testing.T) { // Test that the content is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) - stdout1, _ := runContainer(r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) + stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) { t.Fatal("Container failed to transfer content to volume") } } func TestBindMounts(t *testing.T) { - r := mkRuntime(t) - defer nuke(r) + eng := NewTestEngine(t) + r := mkRuntimeFromEngine(eng, t) + defer r.Nuke() + tmpDir := tempDir(t) defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", t) // Test reading from a read-only bind mount - stdout, _ := runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) + stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) if !strings.Contains(stdout, "touch-me") { t.Fatal("Container failed to read from bind mount") } // test writing to bind mount - runContainer(r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) + runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist // test mounting to an illegal destination directory - if _, err := runContainer(r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { + if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { t.Fatal("Container bind mounted illegal directory") } } @@ -1344,7 +1259,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1364,7 +1279,7 @@ func TestFromVolumesInReadonlyMode(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID + ":ro", @@ -1405,7 +1320,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1425,7 +1340,7 @@ func TestVolumesFromReadonlyMount(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID, @@ -1461,7 +1376,7 @@ func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1505,7 +1420,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1534,7 +1449,7 @@ func TestVolumesFromWithVolumes(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat", "/test/foo"}, VolumesFrom: container.ID, @@ -1568,26 +1483,42 @@ func TestVolumesFromWithVolumes(t *testing.T) { } func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { - runtime := mkRuntime(t) + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) + config, hc, _, err := docker.ParseRun([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) if err != nil { t.Fatal(err) } - c, _, err := runtime.Create(config, "") - if err != nil { + + jobCreate := eng.Job("create") + if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } + var id string + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { + t.Fatal(err) + } + // FIXME: this hack can be removed once Wait is a job + c := runtime.Get(id) + if c == nil { + t.Fatalf("Couldn't retrieve container %s from runtime", id) + } stdout, err := c.StdoutPipe() if err != nil { t.Fatal(err) } - defer runtime.Destroy(c) - c.hostConfig = hc - if err := c.Start(); err != nil { + + jobStart := eng.Job("start", id) + if err := jobStart.ImportEnv(hc); err != nil { t.Fatal(err) } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + c.WaitTimeout(500 * time.Millisecond) c.Wait() output, err := ioutil.ReadAll(stdout) @@ -1602,37 +1533,40 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { if !strings.HasSuffix(interfaces[0], ": lo") { t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) } - } func TestPrivilegedCanMknod(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mknod into privileged container") } } func TestPrivilegedCanMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mount into privileged container") } } func TestPrivilegedCannotMknod(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { t.Fatal("Could mknod into secure container") } } func TestPrivilegedCannotMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - if output, _ := runContainer(runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { t.Fatal("Could mount into secure container") } } @@ -1641,7 +1575,7 @@ func TestMultipleVolumesFrom(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1670,7 +1604,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container2, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Volumes: map[string]struct{}{"/other": {}}, @@ -1692,7 +1626,7 @@ func TestMultipleVolumesFrom(t *testing.T) { } container3, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), @@ -1720,7 +1654,7 @@ func TestRestartGhost(t *testing.T) { defer nuke(runtime) container, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, @@ -1742,67 +1676,3 @@ func TestRestartGhost(t *testing.T) { t.Fatal(err) } } - -func TestRemoveFile(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "touch test.txt"}, t) - defer runtime.Destroy(container1) - - if container1.State.Running { - t.Errorf("Container shouldn't be running") - } - if err := container1.Run(); err != nil { - t.Fatal(err) - } - if container1.State.Running { - t.Errorf("Container shouldn't be running") - } - - commit := func(container *Container) (*Image, error) { - rwTar, err := container.ExportRw() - if err != nil { - return nil, err - } - img, err := runtime.graph.Create(rwTar, container, "unit test commited image", "", nil) - if err != nil { - return nil, err - } - return img, nil - } - - img, err := commit(container1) - if err != nil { - t.Fatal(err) - } - - container2, _ := mkContainer(runtime, []string{img.ID, "/bin/sh", "-c", "rm /test.txt"}, t) - defer runtime.Destroy(container2) - - if err := container2.Run(); err != nil { - t.Fatal(err) - } - - containerMount, err := runtime.driver.Get(container2.ID) - if err != nil { - t.Fatal(err) - } - if _, err := os.Stat(path.Join(containerMount, "test.txt")); err == nil { - t.Fatalf("test.txt should not exist") - } - - img, err = commit(container2) - if err != nil { - t.Fatal(err) - } - - mountPoint, err := runtime.driver.Get(img.ID) - if err != nil { - t.Fatal(err) - } - file := path.Join(mountPoint, "test.txt") - if _, err := os.Stat(file); err == nil { - t.Fatalf("The file %s should not exist\n", file) - } -} diff --git a/integration/graph_test.go b/integration/graph_test.go new file mode 100644 index 0000000000..8c517255ba --- /dev/null +++ b/integration/graph_test.go @@ -0,0 +1,59 @@ +package docker + +import ( + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/graphdriver" + "io/ioutil" + "os" + "path" + "testing" +) + +func TestMount(t *testing.T) { + graph, driver := tempGraph(t) + defer os.RemoveAll(graph.Root) + defer driver.Cleanup() + + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + image, err := graph.Create(archive, nil, "Testing", "", nil) + if err != nil { + t.Fatal(err) + } + tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + rootfs := path.Join(tmp, "rootfs") + if err := os.MkdirAll(rootfs, 0700); err != nil { + t.Fatal(err) + } + rw := path.Join(tmp, "rw") + if err := os.MkdirAll(rw, 0700); err != nil { + t.Fatal(err) + } + + if _, err := driver.Get(image.ID); err != nil { + t.Fatal(err) + } +} + +//FIXME: duplicate +func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) { + tmp, err := ioutil.TempDir("", "docker-graph-") + if err != nil { + t.Fatal(err) + } + driver, err := graphdriver.New(tmp) + if err != nil { + t.Fatal(err) + } + graph, err := docker.NewGraph(tmp, driver) + if err != nil { + t.Fatal(err) + } + return graph, driver +} diff --git a/integration/iptables_test.go b/integration/iptables_test.go new file mode 100644 index 0000000000..060d0fe074 --- /dev/null +++ b/integration/iptables_test.go @@ -0,0 +1,22 @@ +package docker + +import ( + "github.com/dotcloud/docker/iptables" + "os" + "testing" +) + +// FIXME: this test should be a unit test. +// For example by mocking os/exec to make sure iptables is not actually called. + +func TestIptables(t *testing.T) { + if _, err := iptables.Raw("-L"); err != nil { + t.Fatal(err) + } + path := os.Getenv("PATH") + os.Setenv("PATH", "") + defer os.Setenv("PATH", path) + if _, err := iptables.Raw("-L"); err == nil { + t.Fatal("Not finding iptables in the PATH should cause an error") + } +} diff --git a/runtime_test.go b/integration/runtime_test.go similarity index 73% rename from runtime_test.go rename to integration/runtime_test.go index 390141f8ab..320a3645b0 100644 --- a/runtime_test.go +++ b/integration/runtime_test.go @@ -3,6 +3,7 @@ package docker import ( "bytes" "fmt" + "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" @@ -15,7 +16,6 @@ import ( "runtime" "strconv" "strings" - "sync" "syscall" "testing" "time" @@ -32,42 +32,33 @@ const ( ) var ( - globalRuntime *Runtime + // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. + globalRuntime *docker.Runtime + globalEngine *engine.Engine startFds int startGoroutines int ) -func nuke(runtime *Runtime) error { - if nonuke := os.Getenv("NONUKE"); nonuke != "" { - return nil - } - var wg sync.WaitGroup - for _, container := range runtime.List() { - wg.Add(1) - go func(c *Container) { - c.Kill() - wg.Done() - }(container) - } - wg.Wait() - runtime.Close() - - os.Remove(filepath.Join(runtime.config.Root, "linkgraph.db")) - return os.RemoveAll(runtime.config.Root) +// FIXME: nuke() is deprecated by Runtime.Nuke() +func nuke(runtime *docker.Runtime) error { + return runtime.Nuke() } -func cleanup(runtime *Runtime) error { +// FIXME: cleanup and nuke are redundant. +func cleanup(eng *engine.Engine, t *testing.T) error { + runtime := mkRuntimeFromEngine(eng, t) for _, container := range runtime.List() { container.Kill() runtime.Destroy(container) } - images, err := runtime.graph.Map() + srv := mkServerFromEngine(eng, t) + images, err := srv.Images(true, "") if err != nil { return err } for _, image := range images { if image.ID != unitTestImageID { - runtime.graph.Delete(image.ID) + srv.ImageDelete(image.ID, false) } } return nil @@ -136,10 +127,9 @@ func setupBaseImage() { log.Fatalf("Unable to create a runtime for tests:", err) } srv := mkServerFromEngine(eng, log.New(os.Stderr, "", 0)) - runtime := srv.runtime // If the unit test is not found, try to download it. - if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID { + if img, err := srv.ImageInspect(unitTestImageName); err != nil || img.ID != unitTestImageID { // Retrieve the Image if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil { log.Fatalf("Unable to pull the test image: %s", err) @@ -154,8 +144,8 @@ func spawnGlobalDaemon() { } t := log.New(os.Stderr, "", 0) eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - globalRuntime = srv.runtime + globalEngine = eng + globalRuntime = mkRuntimeFromEngine(eng, t) // Spawn a Daemon go func() { @@ -177,8 +167,8 @@ func spawnGlobalDaemon() { // FIXME: test that ImagePull(json=true) send correct json output -func GetTestImage(runtime *Runtime) *Image { - imgs, err := runtime.graph.Map() +func GetTestImage(runtime *docker.Runtime) *docker.Image { + imgs, err := runtime.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image:", err) } @@ -187,7 +177,7 @@ func GetTestImage(runtime *Runtime) *Image { return image } } - log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.graph.Root, imgs) + log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs) return nil } @@ -200,7 +190,7 @@ func TestRuntimeCreate(t *testing.T) { t.Errorf("Expected 0 containers, %v found", len(runtime.List())) } - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, @@ -241,13 +231,25 @@ func TestRuntimeCreate(t *testing.T) { t.Errorf("Exists() returned false for a newly created container") } + // Test that conflict error displays correct details + testContainer, _, _ := runtime.Create( + &docker.Config{ + Image: GetTestImage(runtime).ID, + Cmd: []string{"ls", "-al"}, + }, + "conflictname", + ) + if _, _, err := runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { + t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) + } + // Make sure create with bad parameters returns an error - if _, _, err = runtime.Create(&Config{Image: GetTestImage(runtime).ID}, ""); err == nil { + if _, _, err = runtime.Create(&docker.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := runtime.Create( - &Config{ + &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{}, }, @@ -256,7 +258,7 @@ func TestRuntimeCreate(t *testing.T) { t.Fatal("Builder.Create should throw an error when Cmd is empty") } - config := &Config{ + config := &docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, @@ -269,7 +271,7 @@ func TestRuntimeCreate(t *testing.T) { } // test expose 80:8000 - container, warnings, err := runtime.Create(&Config{ + container, warnings, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, @@ -288,7 +290,7 @@ func TestDestroy(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container, _, err := runtime.Create(&Config{ + container, _, err := runtime.Create(&docker.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") @@ -315,12 +317,6 @@ func TestDestroy(t *testing.T) { t.Errorf("Unable to get newly created container") } - // Make sure the container root directory does not exist anymore - _, err = os.Stat(container.root) - if err == nil || !os.IsNotExist(err) { - t.Errorf("Container root directory still exists after destroy") - } - // Test double destroy if err := runtime.Destroy(container); err == nil { // It should have failed @@ -332,13 +328,13 @@ func TestGet(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) - container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container1) - container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container2) - container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) + container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container3) if runtime.Get(container1.ID) != container1 { @@ -355,15 +351,21 @@ func TestGet(t *testing.T) { } -func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) { +func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) { var ( - err error - container *Container - strPort string - runtime = mkRuntime(t) - port = 5554 - p Port + err error + id string + strPort string + eng = NewTestEngine(t) + runtime = mkRuntimeFromEngine(eng, t) + port = 5554 + p docker.Port ) + defer func() { + if err != nil { + runtime.Nuke() + } + }() for { port += 1 @@ -376,37 +378,45 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } - ep := make(map[Port]struct{}, 1) - p = Port(fmt.Sprintf("%s/%s", strPort, proto)) + ep := make(map[docker.Port]struct{}, 1) + p = docker.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} - container, _, err = runtime.Create(&Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", cmd}, - PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)}, - ExposedPorts: ep, - }, "") - if err != nil { - nuke(runtime) + jobCreate := eng.Job("create") + jobCreate.Setenv("Image", unitTestImageID) + jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) + jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) + jobCreate.SetenvJson("ExposedPorts", ep) + jobCreate.StdoutParseString(&id) + if err := jobCreate.Run(); err != nil { t.Fatal(err) } - - if container != nil { + // FIXME: this relies on the undocumented behavior of runtime.Create + // which will return a nil error AND container if the exposed ports + // are invalid. That behavior should be fixed! + if id != "" { break } t.Logf("Port %v already in use, trying another one", strPort) + } - container.hostConfig = &HostConfig{ - PortBindings: make(map[Port][]PortBinding), - } - container.hostConfig.PortBindings[p] = []PortBinding{ + jobStart := eng.Job("start", id) + portBindings := make(map[docker.Port][]docker.PortBinding) + portBindings[p] = []docker.PortBinding{ {}, } - if err := container.Start(); err != nil { - nuke(runtime) + if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { t.Fatal(err) } + if err := jobStart.Run(); err != nil { + t.Fatal(err) + } + + container := runtime.Get(id) + if container == nil { + t.Fatalf("Couldn't fetch test container %s", id) + } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { for !container.State.Running { @@ -507,14 +517,15 @@ func TestAllocateUDPPortLocalhost(t *testing.T) { } func TestRestore(t *testing.T) { - runtime1 := mkRuntime(t) - defer nuke(runtime1) + eng := NewTestEngine(t) + runtime1 := mkRuntimeFromEngine(eng, t) + defer runtime1.Nuke() // Create a container with one instance of docker - container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) + container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) defer runtime1.Destroy(container1) // Create a second container meant to be killed - container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) + container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) defer runtime1.Destroy(container2) // Start the container non blocking @@ -548,12 +559,19 @@ func TestRestore(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - runtime1.config.AutoRestart = false - runtime2, err := NewRuntimeFromDirectory(runtime1.config) + root := eng.Root() + eng, err := engine.New(root) if err != nil { t.Fatal(err) } - defer nuke(runtime2) + job := eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", false) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } @@ -578,14 +596,31 @@ func TestRestore(t *testing.T) { } func TestReloadContainerLinks(t *testing.T) { - runtime1 := mkRuntime(t) + // FIXME: here we don't use NewTestEngine because it calls initapi with Autorestart=false, + // and we want to set it to true. + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + eng, err := engine.New(root) + if err != nil { + t.Fatal(err) + } + job := eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime1 := mkRuntimeFromEngine(eng, t) defer nuke(runtime1) // Create a container with one instance of docker - container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) + container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t) defer runtime1.Destroy(container1) // Create a second container meant to be killed - container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) + container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) defer runtime1.Destroy(container2) // Start the container non blocking @@ -593,7 +628,9 @@ func TestReloadContainerLinks(t *testing.T) { t.Fatal(err) } // Add a link to container 2 - container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} + // FIXME @shykes: setting hostConfig.Links seems redundant with calling RegisterLink(). + // Why do we need it @crosbymichael? + // container1.hostConfig.Links = []string{"/" + container2.ID + ":first"} if err := runtime1.RegisterLink(container1, container2, "first"); err != nil { t.Fatal(err) } @@ -615,12 +652,18 @@ func TestReloadContainerLinks(t *testing.T) { // Here are are simulating a docker restart - that is, reloading all containers // from scratch - runtime1.config.AutoRestart = true - runtime2, err := NewRuntimeFromDirectory(runtime1.config) + eng, err = engine.New(root) if err != nil { t.Fatal(err) } - defer nuke(runtime2) + job = eng.Job("initapi") + job.Setenv("Root", eng.Root()) + job.SetenvBool("Autorestart", false) + if err := job.Run(); err != nil { + t.Fatal(err) + } + + runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } @@ -634,27 +677,32 @@ func TestReloadContainerLinks(t *testing.T) { t.Fatalf("Expected 2 container alive, %d found", runningCount) } + // FIXME: we no longer test if containers were registered in the right order, + // because there is no public // Make sure container 2 ( the child of container 1 ) was registered and started first // with the runtime - first := runtime2.containers.Front() - if first.Value.(*Container).ID != container2.ID { + // + containers := runtime2.List() + if len(containers) == 0 { + t.Fatalf("Runtime has no containers") + } + first := containers[0] + if first.ID != container2.ID { t.Fatalf("Container 2 %s should be registered first in the runtime", container2.ID) } // Verify that the link is still registered in the runtime - entity := runtime2.containerGraph.Get(container1.Name) - if entity == nil { - t.Fatal("Entity should not be nil") + if c := runtime2.Get(container1.Name); c == nil { + t.Fatal("Named container is no longer registered after restart") } } func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -666,29 +714,19 @@ func TestDefaultContainerName(t *testing.T) { t.Fatalf("Expect /some_name got %s", container.Name) } - paths := runtime.containerGraph.RefPaths(containerID) - if paths == nil || len(paths) == 0 { - t.Fatalf("Could not find edges for %s", containerID) - } - edge := paths[0] - if edge.ParentID != "0" { - t.Fatalf("Expected engine got %s", edge.ParentID) - } - if edge.EntityID != containerID { - t.Fatalf("Expected %s got %s", containerID, edge.EntityID) - } - if edge.Name != "some_name" { - t.Fatalf("Expected some_name got %s", edge.Name) + if c := runtime.Get("/some_name"); c == nil { + t.Fatalf("Couldn't retrieve test container as /some_name") + } else if c.ID != containerID { + t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) } } func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -700,29 +738,19 @@ func TestRandomContainerName(t *testing.T) { t.Fatalf("Expected not empty container name") } - paths := runtime.containerGraph.RefPaths(containerID) - if paths == nil || len(paths) == 0 { - t.Fatalf("Could not find edges for %s", containerID) - } - edge := paths[0] - if edge.ParentID != "0" { - t.Fatalf("Expected engine got %s", edge.ParentID) - } - if edge.EntityID != containerID { - t.Fatalf("Expected %s got %s", containerID, edge.EntityID) - } - if edge.Name == "" { - t.Fatalf("Expected not empty container name") + if c := runtime.Get(container.Name); c == nil { + log.Fatalf("Could not lookup container %s by its name", container.Name) + } else if c.ID != containerID { + log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) } } func TestLinkChildContainer(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -738,7 +766,7 @@ func TestLinkChildContainer(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = docker.ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -761,11 +789,10 @@ func TestLinkChildContainer(t *testing.T) { func TestGetAllChildren(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime + runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -781,7 +808,7 @@ func TestGetAllChildren(t *testing.T) { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } - config, _, _, err = ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err = docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -813,19 +840,3 @@ func TestGetAllChildren(t *testing.T) { } } } - -func TestGetFullName(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - name, err := runtime.getFullName("testing") - if err != nil { - t.Fatal(err) - } - if name != "/testing" { - t.Fatalf("Expected /testing got %s", name) - } - if _, err := runtime.getFullName(""); err == nil { - t.Fatal("Error should not be nil") - } -} diff --git a/server_test.go b/integration/server_test.go similarity index 52% rename from server_test.go rename to integration/server_test.go index 1ab38422f5..6c61bedafb 100644 --- a/server_test.go +++ b/integration/server_test.go @@ -1,32 +1,31 @@ package docker import ( + "github.com/dotcloud/docker" "github.com/dotcloud/docker/utils" "io/ioutil" "strings" "testing" - "time" ) func TestContainerTagImageDelete(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() - srv := &Server{runtime: runtime} + srv := mkServerFromEngine(eng, t) initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - - if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil { t.Fatal(err) } @@ -82,46 +81,43 @@ func TestContainerTagImageDelete(t *testing.T) { func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } if err = srv.ContainerDestroy(id, true, false); err != nil { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } } func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := ParseRun([]string{"-v", "/srv", GetTestImage(runtime).ID, "echo test"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{"-v", "/srv", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } job := eng.Job("start", id) @@ -141,18 +137,17 @@ func TestCreateRmVolumes(t *testing.T) { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } } func TestCommit(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, _, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) + config, _, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } @@ -167,18 +162,17 @@ func TestCommit(t *testing.T) { func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "/bin/cat"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) - if len(runtime.List()) != 1 { - t.Errorf("Expected 1 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 1 { + t.Errorf("Expected 1 container, %v found", len(c)) } job := eng.Job("start", id) @@ -214,21 +208,18 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { t.Fatal(err) } - if len(runtime.List()) != 0 { - t.Errorf("Expected 0 container, %v found", len(runtime.List())) + if c := srv.Containers(true, false, -1, "", ""); len(c) != 0 { + t.Errorf("Expected 0 container, %v found", len(c)) } - } func TestRunWithTooLowMemoryLimit(t *testing.T) { eng := NewTestEngine(t) - srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. job := eng.Job("create") - job.Setenv("Image", GetTestImage(runtime).ID) + job.Setenv("Image", unitTestImageID) job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) @@ -239,163 +230,17 @@ func TestRunWithTooLowMemoryLimit(t *testing.T) { } } -func TestContainerTop(t *testing.T) { - t.Skip("Fixme. Skipping test for now. Reported error: 'server_test.go:236: Expected 2 processes, found 1.'") - - runtime := mkRuntime(t) - defer nuke(runtime) - - srv := &Server{runtime: runtime} - - c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t) - c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t) - if err != nil { - t.Fatal(err) - } - - defer runtime.Destroy(c) - if err := c.Start(); err != nil { - t.Fatal(err) - } - - // Give some time to the process to start - c.WaitTimeout(500 * time.Millisecond) - - if !c.State.Running { - t.Errorf("Container should be running") - } - procs, err := srv.ContainerTop(c.ID, "") - if err != nil { - t.Fatal(err) - } - - if len(procs.Processes) != 2 { - t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes)) - } - - pos := -1 - for i := 0; i < len(procs.Titles); i++ { - if procs.Titles[i] == "CMD" { - pos = i - break - } - } - - if pos == -1 { - t.Fatalf("Expected CMD, not found.") - } - - if procs.Processes[0][pos] != "sh" && procs.Processes[0][pos] != "busybox" { - t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[0][pos]) - } - - if procs.Processes[1][pos] != "sh" && procs.Processes[1][pos] != "busybox" { - t.Fatalf("Expected `busybox` or `sh`, found %s.", procs.Processes[1][pos]) - } -} - -func TestPools(t *testing.T) { - runtime := mkRuntime(t) - srv := &Server{ - runtime: runtime, - pullingPool: make(map[string]struct{}), - pushingPool: make(map[string]struct{}), - } - defer nuke(runtime) - - err := srv.poolAdd("pull", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolAdd("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolAdd("push", "test1") - if err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - err = srv.poolAdd("pull", "test1") - if err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - err = srv.poolAdd("wait", "test3") - if err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } - - err = srv.poolRemove("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("pull", "test2") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("pull", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("push", "test1") - if err != nil { - t.Fatal(err) - } - err = srv.poolRemove("wait", "test3") - if err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } -} - -func TestLogEvent(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - srv := &Server{ - runtime: runtime, - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners["test"] = listener - srv.Unlock() - - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") - - if len(srv.events) != 2 { - t.Fatalf("Expected 2 events, found %d", len(srv.events)) - } - go func() { - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction3", "fakeid", "fakeimage") - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction4", "fakeid", "fakeimage") - }() - - setTimeout(t, "Listening for events timed out", 2*time.Second, func() { - for i := 2; i < 4; i++ { - event := <-listener - if event != srv.events[i] { - t.Fatalf("Event received it different than expected") - } - } - }) -} - func TestRmi(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) - runtime := srv.runtime - defer nuke(runtime) + defer mkRuntimeFromEngine(eng, t).Nuke() initialImages, err := srv.Images(false, "") if err != nil { t.Fatal(err) } - config, hostConfig, _, err := ParseRun([]string{GetTestImage(runtime).ID, "echo test"}, nil) + config, hostConfig, _, err := docker.ParseRun([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } @@ -471,19 +316,19 @@ func TestRmi(t *testing.T) { } func TestImagesFilter(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) - srv := &Server{runtime: runtime} + srv := mkServerFromEngine(eng, t) - if err := srv.runtime.repositories.Set("utest", "tag1", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest", "tag1", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest/docker", "tag2", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest/docker", "tag2", false); err != nil { t.Fatal(err) } - if err := srv.runtime.repositories.Set("utest:5000/docker", "tag3", unitTestImageName, false); err != nil { + if err := srv.ContainerTag(unitTestImageName, "utest:5000/docker", "tag3", false); err != nil { t.Fatal(err) } @@ -525,9 +370,9 @@ func TestImagesFilter(t *testing.T) { } func TestImageInsert(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - srv := &Server{runtime: runtime} + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) sf := utils.NewStreamFormatter(true) // bad image name fails @@ -536,12 +381,12 @@ func TestImageInsert(t *testing.T) { } // bad url fails - if err := srv.ImageInsert(GetTestImage(runtime).ID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { + if err := srv.ImageInsert(unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo", ioutil.Discard, sf); err == nil { t.Fatal("expected an error and got none") } // success returns nil - if err := srv.ImageInsert(GetTestImage(runtime).ID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { + if err := srv.ImageInsert(unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo", ioutil.Discard, sf); err != nil { t.Fatalf("expected no error, but got %v", err) } } diff --git a/integration/sorter_test.go b/integration/sorter_test.go new file mode 100644 index 0000000000..77848c7ddf --- /dev/null +++ b/integration/sorter_test.go @@ -0,0 +1,63 @@ +package docker + +import ( + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "testing" + "time" +) + +func TestServerListOrderedImagesByCreationDate(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + + if err := generateImage("", srv); err != nil { + t.Fatal(err) + } + + images, err := srv.Images(true, "") + if err != nil { + t.Fatal(err) + } + + if images[0].Created < images[1].Created { + t.Error("Expected []APIImges to be ordered by most recent creation date.") + } +} + +func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { + eng := NewTestEngine(t) + defer mkRuntimeFromEngine(eng, t).Nuke() + srv := mkServerFromEngine(eng, t) + + err := generateImage("bar", srv) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + + err = generateImage("zed", srv) + if err != nil { + t.Fatal(err) + } + + images, err := srv.Images(true, "") + if err != nil { + t.Fatal(err) + } + + if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" { + t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images) + } +} + +func generateImage(name string, srv *docker.Server) error { + archive, err := fakeTar() + if err != nil { + return err + } + return srv.ImageImport("-", "repo", name, archive, ioutil.Discard, utils.NewStreamFormatter(true)) +} diff --git a/integration/utils_test.go b/integration/utils_test.go new file mode 100644 index 0000000000..278924edb7 --- /dev/null +++ b/integration/utils_test.go @@ -0,0 +1,328 @@ +package docker + +import ( + "archive/tar" + "bytes" + "fmt" + "github.com/dotcloud/docker" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "testing" + "time" +) + +// This file contains utility functions for docker's unit test suite. +// It has to be named XXX_test.go, apparently, in other to access private functions +// from other XXX_test.go functions. + +// Create a temporary runtime suitable for unit testing. +// Call t.Fatal() at the first error. +func mkRuntime(f utils.Fataler) *docker.Runtime { + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + f.Fatal(err) + } + config := &docker.DaemonConfig{ + Root: root, + AutoRestart: false, + } + r, err := docker.NewRuntimeFromDirectory(config) + if err != nil { + f.Fatal(err) + } + r.UpdateCapabilities(true) + return r +} + +func createNamedTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler, name string) (shortId string) { + job := eng.Job("create", name) + if err := job.ImportEnv(config); err != nil { + f.Fatal(err) + } + job.StdoutParseString(&shortId) + if err := job.Run(); err != nil { + f.Fatal(err) + } + return +} + +func createTestContainer(eng *engine.Engine, config *docker.Config, f utils.Fataler) (shortId string) { + return createNamedTestContainer(eng, config, f, "") +} + +func startContainer(eng *engine.Engine, id string, t utils.Fataler) { + job := eng.Job("start", id) + if err := job.Run(); err != nil { + t.Fatal(err) + } +} + +func containerRun(eng *engine.Engine, id string, t utils.Fataler) { + startContainer(eng, id, t) + containerWait(eng, id, t) +} + +func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool { + c := getContainer(eng, id, t) + if err := c.EnsureMounted(); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { + if os.IsNotExist(err) { + return false + } + t.Fatal(err) + } + return true +} + +func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) { + c := getContainer(eng, id, t) + i, err := c.StdinPipe() + if err != nil { + t.Fatal(err) + } + o, err := c.StdoutPipe() + if err != nil { + t.Fatal(err) + } + return i, o +} + +func containerWait(eng *engine.Engine, id string, t utils.Fataler) int { + return getContainer(eng, id, t).Wait() +} + +func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error { + return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond) +} + +func containerKill(eng *engine.Engine, id string, t utils.Fataler) { + if err := getContainer(eng, id, t).Kill(); err != nil { + t.Fatal(err) + } +} + +func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool { + return getContainer(eng, id, t).State.Running +} + +func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) { + getContainer(eng, id, t) +} + +func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) { + runtime := mkRuntimeFromEngine(eng, t) + if c := runtime.Get(id); c != nil { + t.Fatal(fmt.Errorf("Container %s should not exist", id)) + } +} + +// assertHttpNotError expect the given response to not have an error. +// Otherwise the it causes the test to fail. +func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) { + // Non-error http status are [200, 400) + if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { + t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) + } +} + +// assertHttpError expect the given response to have an error. +// Otherwise the it causes the test to fail. +func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) { + // Non-error http status are [200, 400) + if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { + t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) + } +} + +func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container { + runtime := mkRuntimeFromEngine(eng, t) + c := runtime.Get(id) + if c == nil { + t.Fatal(fmt.Errorf("No such container: %s", id)) + } + return c +} + +func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { + iSrv := eng.Hack_GetGlobalVar("httpapi.server") + if iSrv == nil { + panic("Legacy server field not set in engine") + } + srv, ok := iSrv.(*docker.Server) + if !ok { + panic("Legacy server field in engine does not cast to *docker.Server") + } + return srv +} + +func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { + iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime") + if iRuntime == nil { + panic("Legacy runtime field not set in engine") + } + runtime, ok := iRuntime.(*docker.Runtime) + if !ok { + panic("Legacy runtime field in engine does not cast to *docker.Runtime") + } + return runtime +} + +func NewTestEngine(t utils.Fataler) *engine.Engine { + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + eng, err := engine.New(root) + if err != nil { + t.Fatal(err) + } + // Load default plugins + // (This is manually copied and modified from main() until we have a more generic plugin system) + job := eng.Job("initapi") + job.Setenv("Root", root) + job.SetenvBool("AutoRestart", false) + // TestGetEnabledCors and TestOptionsRoute require EnableCors=true + job.SetenvBool("EnableCors", true) + if err := job.Run(); err != nil { + t.Fatal(err) + } + return eng +} + +func newTestDirectory(templateDir string) (dir string, err error) { + return utils.TestDirectory(templateDir) +} + +func getCallerName(depth int) string { + return utils.GetCallerName(depth) +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Call t.Fatal() at the first error. +func writeFile(dst, content string, t *testing.T) { + // Create subdirectories if necessary + if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { + t.Fatal(err) + } + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + if err != nil { + t.Fatal(err) + } + // Write content (truncate if it exists) + if _, err := io.Copy(f, strings.NewReader(content)); err != nil { + t.Fatal(err) + } +} + +// Return the contents of file at path `src`. +// Call t.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, t *testing.T) (content string) { + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + return string(data) +} + +// Create a test container from the given runtime `r` and run arguments `args`. +// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is +// dynamically replaced by the current test image. +// The caller is responsible for destroying the container. +// Call t.Fatal() at the first error. +func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *docker.HostConfig, error) { + config, hc, _, err := docker.ParseRun(args, nil) + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + if err != nil { + return nil, nil, err + } + if config.Image == "_" { + config.Image = GetTestImage(r).ID + } + c, _, err := r.Create(config, "") + if err != nil { + return nil, nil, err + } + // NOTE: hostConfig is ignored. + // If `args` specify privileged mode, custom lxc conf, external mount binds, + // port redirects etc. they will be ignored. + // This is because the correct way to set these things is to pass environment + // to the `start` job. + // FIXME: this helper function should be deprecated in favor of calling + // `create` and `start` jobs directly. + return c, hc, nil +} + +// Create a test container, start it, wait for it to complete, destroy it, +// and return its standard output as a string. +// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. +// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. +func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) { + defer func() { + if err != nil && t != nil { + t.Fatal(err) + } + }() + container, hc, err := mkContainer(r, args, t) + if err != nil { + return "", err + } + defer r.Destroy(container) + stdout, err := container.StdoutPipe() + if err != nil { + return "", err + } + defer stdout.Close() + + job := eng.Job("start", container.ID) + if err := job.ImportEnv(hc); err != nil { + return "", err + } + if err := job.Run(); err != nil { + return "", err + } + + container.Wait() + data, err := ioutil.ReadAll(stdout) + if err != nil { + return "", err + } + output = string(data) + return +} + +// FIXME: this is duplicated from graph_test.go in the docker package. +func fakeTar() (io.Reader, error) { + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} diff --git a/z_final_test.go b/integration/z_final_test.go similarity index 100% rename from z_final_test.go rename to integration/z_final_test.go diff --git a/iptables/iptables_test.go b/iptables/iptables_test.go deleted file mode 100644 index 886a63c03f..0000000000 --- a/iptables/iptables_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package iptables - -import ( - "os" - "testing" -) - -func TestIptables(t *testing.T) { - if _, err := Raw("-L"); err != nil { - t.Fatal(err) - } - path := os.Getenv("PATH") - os.Setenv("PATH", "") - defer os.Setenv("PATH", path) - if _, err := Raw("-L"); err == nil { - t.Fatal("Not finding iptables in the PATH should cause an error") - } -} diff --git a/lxc_template.go b/lxc_template.go index aacca0b01f..2ba2867428 100644 --- a/lxc_template.go +++ b/lxc_template.go @@ -120,7 +120,7 @@ lxc.aa_profile = unconfined # (Note: 'lxc.cap.keep' is coming soon and should replace this under the # security principle 'deny all unless explicitly permitted', see # http://sourceforge.net/mailarchive/message.php?msg_id=31054627 ) -lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config +lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setpcap sys_admin sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config {{end}} # limits diff --git a/lxc_template_unit_test.go b/lxc_template_unit_test.go new file mode 100644 index 0000000000..ce5af1d321 --- /dev/null +++ b/lxc_template_unit_test.go @@ -0,0 +1,102 @@ +package docker + +import ( + "bufio" + "fmt" + "io/ioutil" + "math/rand" + "os" + "strings" + "testing" + "time" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + // Memory is allocated randomly for testing + rand.Seed(time.Now().UTC().UnixNano()) + memMin := 33554432 + memMax := 536870912 + mem := memMin + rand.Intn(memMax-memMin) + // CPU shares as well + cpuMin := 100 + cpuMax := 10000 + cpu := cpuMin + rand.Intn(cpuMax-cpuMin) + container := &Container{ + root: root, + Config: &Config{ + Hostname: "foobar", + Memory: int64(mem), + CpuShares: int64(cpu), + NetworkDisabled: true, + }, + hostConfig: &HostConfig{ + Privileged: false, + }, + } + if err := container.generateLXCConfig(); err != nil { + t.Fatal(err) + } + grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") + grepFile(t, container.lxcConfigPath(), + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + grepFile(t, container.lxcConfigPath(), + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + container := &Container{ + root: root, + Config: &Config{ + Hostname: "foobar", + NetworkDisabled: true, + }, + hostConfig: &HostConfig{ + Privileged: false, + LxcConf: []KeyValuePair{ + { + Key: "lxc.utsname", + Value: "docker", + }, + { + Key: "lxc.cgroup.cpuset.cpus", + Value: "0,1", + }, + }, + }, + } + if err := container.generateLXCConfig(); err != nil { + t.Fatal(err) + } + grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") + grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + return + } + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} diff --git a/runtime.go b/runtime.go index 1146079070..e1ce7551ea 100644 --- a/runtime.go +++ b/runtime.go @@ -1,7 +1,7 @@ package docker import ( - _ "code.google.com/p/gosqlite/sqlite3" + _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite "container/list" "database/sql" "fmt" @@ -20,6 +20,7 @@ import ( "path" "sort" "strings" + "sync" "time" ) @@ -417,7 +418,8 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin // Set the enitity in the graph using the default name specified if _, err := runtime.containerGraph.Set(name, id); err != nil { if strings.HasSuffix(err.Error(), "name are not unique") { - return nil, nil, fmt.Errorf("Conflict, %s already exists.", name) + conflictingContainer, _ := runtime.GetByName(name) + return nil, nil, fmt.Errorf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, utils.TruncateID(conflictingContainer.ID), name) } return nil, nil, err } @@ -548,7 +550,12 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a return img, nil } +// FIXME: this is deprecated by the getFullName *function* func (runtime *Runtime) getFullName(name string) (string, error) { + return getFullName(name) +} + +func getFullName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") } @@ -762,6 +769,25 @@ func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { return archive.ExportChanges(cDir, changes) } +// Nuke kills all containers then removes all content +// from the content root, including images, volumes and +// container filesystems. +// Again: this will remove your entire docker runtime! +func (runtime *Runtime) Nuke() error { + var wg sync.WaitGroup + for _, container := range runtime.List() { + wg.Add(1) + go func(c *Container) { + c.Kill() + wg.Done() + }(container) + } + wg.Wait() + runtime.Close() + + return os.RemoveAll(runtime.config.Root) +} + func linkLxcStart(root string) error { sourcePath, err := exec.LookPath("lxc-start") if err != nil { @@ -779,6 +805,14 @@ func linkLxcStart(root string) error { return os.Symlink(sourcePath, targetPath) } +// FIXME: this is a convenience function for integration tests +// which need direct access to runtime.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (runtime *Runtime) Graph() *Graph { + return runtime.graph +} + // History is a convenience type for storing a list of containers, // ordered by creation date. type History []*Container diff --git a/server.go b/server.go index 4852e2133f..c49844e42b 100644 --- a/server.go +++ b/server.go @@ -62,6 +62,8 @@ func jobInitApi(job *engine.Job) string { os.Exit(0) }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) + job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", srv.runtime.networkManager.bridgeNetwork.IP) if err := job.Eng.Register("create", srv.ContainerCreate); err != nil { return err.Error() } @@ -422,9 +424,9 @@ func (srv *Server) ImageHistory(name string) ([]APIHistory, error) { } -func (srv *Server) ContainerTop(name, ps_args string) (*APITop, error) { +func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) { if container := srv.runtime.Get(name); container != nil { - output, err := exec.Command("lxc-ps", "--name", container.ID, "--", ps_args).CombinedOutput() + output, err := exec.Command("lxc-ps", "--name", container.ID, "--", psArgs).CombinedOutput() if err != nil { return nil, fmt.Errorf("lxc-ps: %s (%s)", err, output) } @@ -532,6 +534,7 @@ func (srv *Server) ContainerCommit(name, repo, tag, author, comment string, conf return img.ID, err } +// FIXME: this should be called ImageTag func (srv *Server) ContainerTag(name, repo, tag string, force bool) error { if err := srv.runtime.repositories.Set(repo, tag, name, force); err != nil { return err @@ -891,12 +894,13 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", elem.ID)) continue } - if checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf); err != nil { + checksum, err := srv.pushImage(r, out, remoteName, elem.ID, ep, repoData.Tokens, sf) + if err != nil { // FIXME: Continue on error? return err - } else { - elem.Checksum = checksum } + elem.Checksum = checksum + if err := pushTags(); err != nil { return err } @@ -936,13 +940,15 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } + defer os.RemoveAll(layerData.Name()) // Send the layer - if checksum, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw); err != nil { + checksum, err = r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf.FormatProgress("", "Pushing", "%8v/%v (%v)"), sf, false), ep, token, jsonRaw) + if err != nil { return "", err - } else { - imgData.Checksum = checksum } + imgData.Checksum = checksum + out.Write(sf.FormatStatus("", "")) // Send the checksum @@ -1065,7 +1071,12 @@ func (srv *Server) ContainerCreate(job *engine.Job) string { return err.Error() } srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) - job.Printf("%s\n", container.ID) + // FIXME: this is necessary because runtime.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } @@ -1603,7 +1614,7 @@ func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HT return srv.reqFactory } -func (srv *Server) LogEvent(action, id, from string) { +func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { now := time.Now().Unix() jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} srv.events = append(srv.events, jm) @@ -1613,6 +1624,7 @@ func (srv *Server) LogEvent(action, id, from string) { default: } } + return &jm } type Server struct { diff --git a/server_unit_test.go b/server_unit_test.go new file mode 100644 index 0000000000..a51e2ddff5 --- /dev/null +++ b/server_unit_test.go @@ -0,0 +1,109 @@ +package docker + +import ( + "github.com/dotcloud/docker/utils" + "testing" + "time" +) + +func TestPools(t *testing.T) { + srv := &Server{ + pullingPool: make(map[string]struct{}), + pushingPool: make(map[string]struct{}), + } + + err := srv.poolAdd("pull", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolAdd("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolAdd("push", "test1") + if err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + err = srv.poolAdd("pull", "test1") + if err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + err = srv.poolAdd("wait", "test3") + if err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + + err = srv.poolRemove("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("pull", "test2") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("pull", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("push", "test1") + if err != nil { + t.Fatal(err) + } + err = srv.poolRemove("wait", "test3") + if err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} + +func TestLogEvent(t *testing.T) { + srv := &Server{ + events: make([]utils.JSONMessage, 0, 64), + listeners: make(map[string]chan utils.JSONMessage), + } + + srv.LogEvent("fakeaction", "fakeid", "fakeimage") + + listener := make(chan utils.JSONMessage) + srv.Lock() + srv.listeners["test"] = listener + srv.Unlock() + + srv.LogEvent("fakeaction2", "fakeid", "fakeimage") + + if len(srv.events) != 2 { + t.Fatalf("Expected 2 events, found %d", len(srv.events)) + } + go func() { + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction3", "fakeid", "fakeimage") + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction4", "fakeid", "fakeimage") + }() + + setTimeout(t, "Listening for events timed out", 2*time.Second, func() { + for i := 2; i < 4; i++ { + event := <-listener + if event != srv.events[i] { + t.Fatalf("Event received it different than expected") + } + } + }) +} + +// FIXME: this is duplicated from integration/commands_test.go +func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { + c := make(chan bool) + + // Make sure we are not too long + go func() { + time.Sleep(d) + c <- true + }() + go func() { + f() + c <- false + }() + if <-c && msg != "" { + t.Fatal(msg) + } +} diff --git a/sorter_test.go b/sorter_test.go deleted file mode 100644 index 54f647132f..0000000000 --- a/sorter_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package docker - -import ( - "fmt" - "testing" - "time" -) - -func TestServerListOrderedImagesByCreationDate(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - _, err = runtime.graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - t.Fatal(err) - } - - srv := &Server{runtime: runtime} - - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } - - if images[0].Created < images[1].Created { - t.Error("Expected []APIImges to be ordered by most recent creation date.") - } -} - -func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - err := generateImage("bar", runtime) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - err = generateImage("zed", runtime) - if err != nil { - t.Fatal(err) - } - - srv := &Server{runtime: runtime} - images, err := srv.Images(true, "") - if err != nil { - t.Fatal(err) - } - - if images[0].RepoTags[0] != "repo:zed" && images[0].RepoTags[0] != "repo:bar" { - t.Errorf("Expected []APIImges to be ordered by most recent creation date. %s", images) - } -} - -func generateImage(name string, runtime *Runtime) error { - - archive, err := fakeTar() - if err != nil { - return err - } - image, err := runtime.graph.Create(archive, nil, "Testing", "", nil) - if err != nil { - return err - } - - srv := &Server{runtime: runtime} - srv.ContainerTag(image.ID, "repo", name, false) - - return nil -} - -func TestSortUniquePorts(t *testing.T) { - ports := []Port{ - Port("6379/tcp"), - Port("22/tcp"), - } - - sortPorts(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "22/tcp" { - t.Log(fmt.Sprint(first)) - t.Fail() - } -} - -func TestSortSamePortWithDifferentProto(t *testing.T) { - ports := []Port{ - Port("8888/tcp"), - Port("8888/udp"), - Port("6379/tcp"), - Port("6379/udp"), - } - - sortPorts(ports, func(ip, jp Port) bool { - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") - }) - - first := ports[0] - if fmt.Sprint(first) != "6379/tcp" { - t.Fail() - } -} diff --git a/sorter_unit_test.go b/sorter_unit_test.go new file mode 100644 index 0000000000..0669feedb3 --- /dev/null +++ b/sorter_unit_test.go @@ -0,0 +1,41 @@ +package docker + +import ( + "fmt" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + sortPorts(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + sortPorts(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} diff --git a/tags_test.go b/tags_test.go deleted file mode 100644 index d920943795..0000000000 --- a/tags_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package docker - -import ( - "testing" -) - -func TestLookupImage(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + DEFAULTTAG); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + "fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := runtime.repositories.LookupImage("fail:fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := runtime.repositories.LookupImage(unitTestImageName + ":" + unitTestImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } -} diff --git a/tags_unit_test.go b/tags_unit_test.go new file mode 100644 index 0000000000..bd8622d46d --- /dev/null +++ b/tags_unit_test.go @@ -0,0 +1,85 @@ +package docker + +import ( + "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/utils" + "os" + "path" + "testing" +) + +const ( + testImageName = "myapp" + testImageID = "foo" +) + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &Image{ID: testImageID} + if err := graph.Register(nil, archive, img); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} diff --git a/utils/http.go b/utils/http.go index 1332ce816d..5eb77d1949 100644 --- a/utils/http.go +++ b/utils/http.go @@ -81,12 +81,12 @@ func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { return ret } -func (self *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { +func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { if req == nil { return req, nil } - userAgent := appendVersions(req.UserAgent(), self.versions...) + userAgent := appendVersions(req.UserAgent(), h.versions...) if len(userAgent) > 0 { req.Header.Set("User-Agent", userAgent) } @@ -97,11 +97,11 @@ type HTTPMetaHeadersDecorator struct { Headers map[string][]string } -func (self *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { - if self.Headers == nil { +func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { + if h.Headers == nil { return req, nil } - for k, v := range self.Headers { + for k, v := range h.Headers { req.Header[k] = v } return req, nil @@ -114,25 +114,25 @@ type HTTPRequestFactory struct { } func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { - ret := new(HTTPRequestFactory) - ret.decorators = d - return ret + return &HTTPRequestFactory{ + decorators: d, + } } // NewRequest() creates a new *http.Request, // applies all decorators in the HTTPRequestFactory on the request, // then applies decorators provided by d on the request. -func (self *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { +func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // By default, a nil factory should work. - if self == nil { + if h == nil { return req, nil } - for _, dec := range self.decorators { + for _, dec := range h.decorators { req, err = dec.ChangeRequest(req) if err != nil { return nil, err diff --git a/utils/utils.go b/utils/utils.go index d16ffe3171..5864add8e1 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1123,7 +1123,7 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) { for len(processed) < len(graph.nodes) { // Use a temporary buffer for processed nodes, otherwise // nodes that depend on each other could end up in the same round. - tmp_processed := []*DependencyNode{} + tmpProcessed := []*DependencyNode{} for _, node := range graph.nodes { // If the node has more dependencies than what we have cleared, // it won't be valid for this round. @@ -1137,7 +1137,7 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) { // It's not been processed yet and has 0 deps. Add it! // (this is a shortcut for what we're doing below) if node.Degree() == 0 { - tmp_processed = append(tmp_processed, node) + tmpProcessed = append(tmpProcessed, node) continue } // If at least one dep hasn't been processed yet, we can't @@ -1151,17 +1151,17 @@ func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) { } // All deps have already been processed. Add it! if ok { - tmp_processed = append(tmp_processed, node) + tmpProcessed = append(tmpProcessed, node) } } - Debugf("Round %d: found %d available nodes", len(result), len(tmp_processed)) + Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed)) // If no progress has been made this round, // that means we have circular dependencies. - if len(tmp_processed) == 0 { + if len(tmpProcessed) == 0 { return nil, fmt.Errorf("Could not find a solution to this dependency graph") } round := []string{} - for _, nd := range tmp_processed { + for _, nd := range tmpProcessed { round = append(round, nd.id) processed[nd] = true } @@ -1242,3 +1242,40 @@ func PartParser(template, data string) (map[string]string, error) { } return out, nil } + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = RandomString()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = CopyDirectory(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} diff --git a/utils_test.go b/utils_test.go deleted file mode 100644 index a9678a9bbd..0000000000 --- a/utils_test.go +++ /dev/null @@ -1,493 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "runtime" - "strings" - "testing" -) - -// This file contains utility functions for docker's unit test suite. -// It has to be named XXX_test.go, apparently, in other to access private functions -// from other XXX_test.go functions. - -var globalTestID string - -// Create a temporary runtime suitable for unit testing. -// Call t.Fatal() at the first error. -func mkRuntime(f utils.Fataler) *Runtime { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - f.Fatal(err) - } - config := &DaemonConfig{ - Root: root, - AutoRestart: false, - } - r, err := NewRuntimeFromDirectory(config) - if err != nil { - f.Fatal(err) - } - r.UpdateCapabilities(true) - return r -} - -func createNamedTestContainer(eng *engine.Engine, config *Config, f utils.Fataler, name string) (shortId string) { - job := eng.Job("create", name) - if err := job.ImportEnv(config); err != nil { - f.Fatal(err) - } - job.StdoutParseString(&shortId) - if err := job.Run(); err != nil { - f.Fatal(err) - } - return -} - -func createTestContainer(eng *engine.Engine, config *Config, f utils.Fataler) (shortId string) { - return createNamedTestContainer(eng, config, f, "") -} - -func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *Server { - iSrv := eng.Hack_GetGlobalVar("httpapi.server") - if iSrv == nil { - panic("Legacy server field not set in engine") - } - srv, ok := iSrv.(*Server) - if !ok { - panic("Legacy server field in engine does not cast to *Server") - } - return srv -} - -func NewTestEngine(t utils.Fataler) *engine.Engine { - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - t.Fatal(err) - } - eng, err := engine.New(root) - if err != nil { - t.Fatal(err) - } - // Load default plugins - // (This is manually copied and modified from main() until we have a more generic plugin system) - job := eng.Job("initapi") - job.Setenv("Root", root) - job.SetenvBool("AutoRestart", false) - if err := job.Run(); err != nil { - t.Fatal(err) - } - return eng -} - -func newTestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = GenerateID()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, getCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if err = utils.CopyDirectory(templateDir, dir); err != nil { - return - } - return -} - -func getCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -// Write `content` to the file at path `dst`, creating it if necessary, -// as well as any missing directories. -// The file is truncated if it already exists. -// Call t.Fatal() at the first error. -func writeFile(dst, content string, t *testing.T) { - // Create subdirectories if necessary - if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { - t.Fatal(err) - } - f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) - if err != nil { - t.Fatal(err) - } - // Write content (truncate if it exists) - if _, err := io.Copy(f, strings.NewReader(content)); err != nil { - t.Fatal(err) - } -} - -// Return the contents of file at path `src`. -// Call t.Fatal() at the first error (including if the file doesn't exist) -func readFile(src string, t *testing.T) (content string) { - f, err := os.Open(src) - if err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(f) - if err != nil { - t.Fatal(err) - } - return string(data) -} - -// Create a test container from the given runtime `r` and run arguments `args`. -// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is -// dynamically replaced by the current test image. -// The caller is responsible for destroying the container. -// Call t.Fatal() at the first error. -func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) { - config, hostConfig, _, err := ParseRun(args, nil) - defer func() { - if err != nil && t != nil { - t.Fatal(err) - } - }() - if err != nil { - return nil, err - } - if config.Image == "_" { - config.Image = GetTestImage(r).ID - } - c, _, err := r.Create(config, "") - if err != nil { - return nil, err - } - c.hostConfig = hostConfig - return c, nil -} - -// Create a test container, start it, wait for it to complete, destroy it, -// and return its standard output as a string. -// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. -// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. -func runContainer(r *Runtime, args []string, t *testing.T) (output string, err error) { - defer func() { - if err != nil && t != nil { - t.Fatal(err) - } - }() - container, err := mkContainer(r, args, t) - if err != nil { - return "", err - } - defer r.Destroy(container) - stdout, err := container.StdoutPipe() - if err != nil { - return "", err - } - defer stdout.Close() - if err := container.Start(); err != nil { - return "", err - } - container.Wait() - data, err := ioutil.ReadAll(stdout) - if err != nil { - return "", err - } - output = string(data) - return -} - -func TestCompareConfig(t *testing.T) { - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - config1 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config2 := Config{ - Dns: []string{"0.0.0.0", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config3 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config4 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "22222222", - Volumes: volumes1, - } - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - config5 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes2, - } - if CompareConfig(&config1, &config2) { - t.Fatalf("CompareConfig should return false, Dns are different") - } - if CompareConfig(&config1, &config3) { - t.Fatalf("CompareConfig should return false, PortSpecs are different") - } - if CompareConfig(&config1, &config4) { - t.Fatalf("CompareConfig should return false, VolumesFrom are different") - } - if CompareConfig(&config1, &config5) { - t.Fatalf("CompareConfig should return false, Volumes are different") - } - if !CompareConfig(&config1, &config1) { - t.Fatalf("CompareConfig should return true") - } -} - -func TestMergeConfig(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - configImage := &Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "1111", - Volumes: volumesImage, - } - - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &Config{ - Dns: []string{"3.3.3.3"}, - PortSpecs: []string{"3333:2222", "3333:3333"}, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := MergeConfig(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.Dns) != 3 { - t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) - } - for _, dns := range configUser.Dns { - if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { - t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) - } - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) - } - } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) - } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) - } - } - - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) - } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) - } - } - - if configUser.VolumesFrom != "1111" { - t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) - } - - ports, _, err := parsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &Config{ - ExposedPorts: ports, - } - - if err := MergeConfig(configUser, configImage2); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) - } - } - -} - -func TestParseLxcConfOpt(t *testing.T) { - opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} - - for _, o := range opts { - k, v, err := parseLxcOpt(o) - if err != nil { - t.FailNow() - } - if k != "lxc.utsname" { - t.Fail() - } - if v != "docker" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := parsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -}